aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
commitf9da455b93f6ba076935b4ef4589f61e529ae046 (patch)
tree3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /drivers/net/ethernet
parent0e04c641b199435f3779454055f6a7de258ecdfc (diff)
parente5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov. 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J Benniston. 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn Mork. 4) BPF now has a "random" opcode, from Chema Gonzalez. 5) Add more BPF documentation and improve test framework, from Daniel Borkmann. 6) Support TCP fastopen over ipv6, from Daniel Lee. 7) Add software TSO helper functions and use them to support software TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia. 8) Support software TSO in fec driver too, from Nimrod Andy. 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli. 10) Handle broadcasts more gracefully over macvlan when there are large numbers of interfaces configured, from Herbert Xu. 11) Allow more control over fwmark used for non-socket based responses, from Lorenzo Colitti. 12) Do TCP congestion window limiting based upon measurements, from Neal Cardwell. 13) Support busy polling in SCTP, from Neal Horman. 14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru. 15) Bridge promisc mode handling improvements from Vlad Yasevich. 16) Don't use inetpeer entries to implement ID generation any more, it performs poorly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 tcp: fixing TLP's FIN recovery net: fec: Add software TSO support net: fec: Add Scatter/gather support net: fec: Increase buffer descriptor entry number net: fec: Factorize feature setting net: fec: Enable IP header hardware checksum net: fec: Factorize the .xmit transmit function bridge: fix compile error when compiling without IPv6 support bridge: fix smatch warning / potential null pointer dereference via-rhine: fix full-duplex with autoneg disable bnx2x: Enlarge the dorq threshold for VFs bnx2x: Check for UNDI in uncommon branch bnx2x: Fix 1G-baseT link bnx2x: Fix link for KR with swapped polarity lane sctp: Fix sk_ack_backlog wrap-around problem net/core: Add VF link state control policy net/fsl: xgmac_mdio is dependent on OF_MDIO net/fsl: Make xgmac_mdio read error message useful net_sched: drr: warn when qdisc is not work conserving ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1007
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c375
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c556
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1351
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c510
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c512
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c433
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h676
-rw-r--r--drivers/net/ethernet/arc/emac_main.c49
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1654
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h678
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c104
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h32
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c323
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c66
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c610
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h85
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c194
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c581
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h13
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c661
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1066
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c423
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c62
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c121
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c134
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c356
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c376
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c324
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c87
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c69
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c192
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c171
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/io.h7
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c114
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c39
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/tile/tilegx.c13
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c511
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
326 files changed, 19853 insertions, 5292 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 35df0b9e6848..a968654b631d 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
534 /* The EL3-specific entries in the device structure. */ 534 /* The EL3-specific entries in the device structure. */
535 dev->netdev_ops = &netdev_ops; 535 dev->netdev_ops = &netdev_ops;
536 dev->watchdog_timeo = TX_TIMEOUT; 536 dev->watchdog_timeo = TX_TIMEOUT;
537 SET_ETHTOOL_OPS(dev, &ethtool_ops); 537 dev->ethtool_ops = &ethtool_ops;
538 538
539 err = register_netdev(dev); 539 err = register_netdev(dev);
540 if (err) { 540 if (err) {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 063557e037f2..f18647c23559 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
218 dev->netdev_ops = &el3_netdev_ops; 218 dev->netdev_ops = &el3_netdev_ops;
219 dev->watchdog_timeo = TX_TIMEOUT; 219 dev->watchdog_timeo = TX_TIMEOUT;
220 220
221 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 221 dev->ethtool_ops = &netdev_ethtool_ops;
222 222
223 return tc589_config(link); 223 return tc589_config(link);
224} 224}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 465cc7108d8a..e13b04624ded 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2435 netif_napi_add(dev, &tp->napi, typhoon_poll, 16); 2435 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2436 dev->watchdog_timeo = TX_TIMEOUT; 2436 dev->watchdog_timeo = TX_TIMEOUT;
2437 2437
2438 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); 2438 dev->ethtool_ops = &typhoon_ethtool_ops;
2439 2439
2440 /* We can handle scatter gather, up to 16 entries, and 2440 /* We can handle scatter gather, up to 16 entries, and
2441 * we can do IP checksumming (only version 4, doh...) 2441 * we can do IP checksumming (only version 4, doh...)
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c399b52..1d162ccb4733 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
157 157
158 /* This check _should_not_ be necessary, omit eventually. */ 158 /* This check _should_not_ be necessary, omit eventually. */
159 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 159 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
160 if (jiffies - reset_start_time > 2 * HZ / 100) { 160 if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
161 netdev_warn(dev, "%s: did not complete.\n", __func__); 161 netdev_warn(dev, "%s: did not complete.\n", __func__);
162 break; 162 break;
163 } 163 }
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
293 dma_start = jiffies; 293 dma_start = jiffies;
294 294
295 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 295 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
296 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ 296 if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
297 netdev_warn(dev, "timeout waiting for Tx RDC.\n"); 297 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
298 ax_reset_8390(dev); 298 ax_reset_8390(dev);
299 ax_NS8390_init(dev, 1); 299 ax_NS8390_init(dev, 1);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 051349458462..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -68,6 +68,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
68source "drivers/net/ethernet/faraday/Kconfig" 68source "drivers/net/ethernet/faraday/Kconfig"
69source "drivers/net/ethernet/freescale/Kconfig" 69source "drivers/net/ethernet/freescale/Kconfig"
70source "drivers/net/ethernet/fujitsu/Kconfig" 70source "drivers/net/ethernet/fujitsu/Kconfig"
71source "drivers/net/ethernet/hisilicon/Kconfig"
71source "drivers/net/ethernet/hp/Kconfig" 72source "drivers/net/ethernet/hp/Kconfig"
72source "drivers/net/ethernet/ibm/Kconfig" 73source "drivers/net/ethernet/ibm/Kconfig"
73source "drivers/net/ethernet/intel/Kconfig" 74source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e36c456..58de3339ab3c 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
31obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ 31obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
32obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ 32obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
33obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ 33obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
34obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
34obj-$(CONFIG_NET_VENDOR_HP) += hp/ 35obj-$(CONFIG_NET_VENDOR_HP) += hp/
35obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ 36obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
36obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 37obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 171d73c1d3c2..40dbbf740331 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
784 784
785 dev->netdev_ops = &netdev_ops; 785 dev->netdev_ops = &netdev_ops;
786 dev->watchdog_timeo = TX_TIMEOUT; 786 dev->watchdog_timeo = TX_TIMEOUT;
787 SET_ETHTOOL_OPS(dev, &ethtool_ops); 787 dev->ethtool_ops = &ethtool_ops;
788 788
789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); 789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
790 790
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1517e9df5ba1..9a6991be9749 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
476 dev->watchdog_timeo = 5*HZ; 476 dev->watchdog_timeo = 5*HZ;
477 477
478 dev->netdev_ops = &ace_netdev_ops; 478 dev->netdev_ops = &ace_netdev_ops;
479 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); 479 dev->ethtool_ops = &ace_ethtool_ops;
480 480
481 /* we only display this string ONCE */ 481 /* we only display this string ONCE */
482 if (!boards_found) 482 if (!boards_found)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f451cf..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -353,7 +353,6 @@ static int sgdma_async_read(struct altera_tse_private *priv)
353 353
354 struct sgdma_descrip __iomem *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
355 struct sgdma_descrip __iomem *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
356
357 struct tse_buffer *rxbuffer = NULL; 356 struct tse_buffer *rxbuffer = NULL;
358 357
359 if (!sgdma_rxbusy(priv)) { 358 if (!sgdma_rxbusy(priv)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25eff7952..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -271,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
271 271
272void altera_tse_set_ethtool_ops(struct net_device *netdev) 272void altera_tse_set_ethtool_ops(struct net_device *netdev)
273{ 273{
274 SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops); 274 netdev->ethtool_ops = &tse_ethtool_ops;
275} 275}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 562df46e0a82..bbaf36d9f5e1 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
7 default y 7 default y
8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \ 8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \ 9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
10 (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA 10 (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
11 ---help--- 11 ---help---
12 If you have a network (Ethernet) chipset belonging to this class, 12 If you have a network (Ethernet) chipset belonging to this class,
13 say Y. 13 say Y.
@@ -177,4 +177,16 @@ config SUNLANCE
177 To compile this driver as a module, choose M here: the module 177 To compile this driver as a module, choose M here: the module
178 will be called sunlance. 178 will be called sunlance.
179 179
180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver"
182 depends on OF_NET
183 select PHYLIB
184 select AMD_XGBE_PHY
185 ---help---
186 This driver supports the AMD 10GbE Ethernet device found on an
187 AMD SoC.
188
189 To compile this driver as a module, choose M here: the module
190 will be called amd-xgbe.
191
180endif # NET_VENDOR_AMD 192endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index cdd4301a973d..a38a2dce3eb3 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NI65) += ni65.o
17obj-$(CONFIG_PCNET32) += pcnet32.o 17obj-$(CONFIG_PCNET32) += pcnet32.o
18obj-$(CONFIG_SUN3LANCE) += sun3lance.o 18obj-$(CONFIG_SUN3LANCE) += sun3lance.o
19obj-$(CONFIG_SUNLANCE) += sunlance.o 19obj-$(CONFIG_SUNLANCE) += sunlance.o
20obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 26efaaa5e73f..068dc7cad5fa 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1900 1900
1901 /* Initialize driver entry points */ 1901 /* Initialize driver entry points */
1902 dev->netdev_ops = &amd8111e_netdev_ops; 1902 dev->netdev_ops = &amd8111e_netdev_ops;
1903 SET_ETHTOOL_OPS(dev, &ops); 1903 dev->ethtool_ops = &ops;
1904 dev->irq =pdev->irq; 1904 dev->irq =pdev->irq;
1905 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; 1905 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1906 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); 1906 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index b08101b31b8b..968b7bfac8fc 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
718 unsigned long mem_start = board + ARIADNE_RAM; 718 unsigned long mem_start = board + ARIADNE_RAM;
719 struct resource *r1, *r2; 719 struct resource *r1, *r2;
720 struct net_device *dev; 720 struct net_device *dev;
721 struct ariadne_private *priv;
722 u32 serial; 721 u32 serial;
723 int err; 722 int err;
724 723
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
738 return -ENOMEM; 737 return -ENOMEM;
739 } 738 }
740 739
741 priv = netdev_priv(dev);
742
743 r1->name = dev->name; 740 r1->name = dev->name;
744 r2->name = dev->name; 741 r2->name = dev->name;
745 742
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a2bd91e3d302..a78e4c136959 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
1229 dev->base_addr = base->start; 1229 dev->base_addr = base->start;
1230 dev->irq = irq; 1230 dev->irq = irq;
1231 dev->netdev_ops = &au1000_netdev_ops; 1231 dev->netdev_ops = &au1000_netdev_ops;
1232 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); 1232 dev->ethtool_ops = &au1000_ethtool_ops;
1233 dev->watchdog_timeo = ETH_TX_TIMEOUT; 1233 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1234 1234
1235 /* 1235 /*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 47ce57c2c893..6c9de117ffc6 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -27,9 +27,9 @@
27 27
28#include "hplance.h" 28#include "hplance.h"
29 29
30/* We have 16834 bytes of RAM for the init block and buffers. This places 30/* We have 16392 bytes of RAM for the init block and buffers. This places
31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx 31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
32 * buffers and 2 Tx buffers. 32 * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
33 */ 33 */
34#define LANCE_LOG_TX_BUFFERS 1 34#define LANCE_LOG_TX_BUFFERS 1
35#define LANCE_LOG_RX_BUFFERS 3 35#define LANCE_LOG_RX_BUFFERS 3
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0e8399dec054..0660ac5846bb 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -26,9 +26,9 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/mvme147hw.h> 27#include <asm/mvme147hw.h>
28 28
29/* We have 16834 bytes of RAM for the init block and buffers. This places 29/* We have 32K of RAM for the init block and buffers. This places
30 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx 30 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
31 * buffers and 2 Tx buffers. 31 * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
32 */ 32 */
33#define LANCE_LOG_TX_BUFFERS 1 33#define LANCE_LOG_TX_BUFFERS 1
34#define LANCE_LOG_RX_BUFFERS 3 34#define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
111 dev->dev_addr); 111 dev->dev_addr);
112 112
113 lp = netdev_priv(dev); 113 lp = netdev_priv(dev);
114 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ 114 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
115 if (!lp->ram) { 115 if (!lp->ram) {
116 printk("%s: No memory for LANCE buffers\n", dev->name); 116 printk("%s: No memory for LANCE buffers\n", dev->name);
117 free_netdev(dev); 117 free_netdev(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 08569fe2b182..abf3b1581c82 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
457 lp->tx_free_frames=AM2150_MAX_TX_FRAMES; 457 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
458 458
459 dev->netdev_ops = &mace_netdev_ops; 459 dev->netdev_ops = &mace_netdev_ops;
460 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 460 dev->ethtool_ops = &netdev_ethtool_ops;
461 dev->watchdog_timeo = TX_TIMEOUT; 461 dev->watchdog_timeo = TX_TIMEOUT;
462 462
463 return nmclan_config(link); 463 return nmclan_config(link);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000000..26cf9af1642f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
2
3amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
4 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
5
6amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000000..bf462ee86f5c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1007 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#ifndef __XGBE_COMMON_H__
118#define __XGBE_COMMON_H__
119
120/* DMA register offsets */
121#define DMA_MR 0x3000
122#define DMA_SBMR 0x3004
123#define DMA_ISR 0x3008
124#define DMA_AXIARCR 0x3010
125#define DMA_AXIAWCR 0x3018
126#define DMA_DSR0 0x3020
127#define DMA_DSR1 0x3024
128#define DMA_DSR2 0x3028
129#define DMA_DSR3 0x302c
130#define DMA_DSR4 0x3030
131
132/* DMA register entry bit positions and sizes */
133#define DMA_AXIARCR_DRC_INDEX 0
134#define DMA_AXIARCR_DRC_WIDTH 4
135#define DMA_AXIARCR_DRD_INDEX 4
136#define DMA_AXIARCR_DRD_WIDTH 2
137#define DMA_AXIARCR_TEC_INDEX 8
138#define DMA_AXIARCR_TEC_WIDTH 4
139#define DMA_AXIARCR_TED_INDEX 12
140#define DMA_AXIARCR_TED_WIDTH 2
141#define DMA_AXIARCR_THC_INDEX 16
142#define DMA_AXIARCR_THC_WIDTH 4
143#define DMA_AXIARCR_THD_INDEX 20
144#define DMA_AXIARCR_THD_WIDTH 2
145#define DMA_AXIAWCR_DWC_INDEX 0
146#define DMA_AXIAWCR_DWC_WIDTH 4
147#define DMA_AXIAWCR_DWD_INDEX 4
148#define DMA_AXIAWCR_DWD_WIDTH 2
149#define DMA_AXIAWCR_RPC_INDEX 8
150#define DMA_AXIAWCR_RPC_WIDTH 4
151#define DMA_AXIAWCR_RPD_INDEX 12
152#define DMA_AXIAWCR_RPD_WIDTH 2
153#define DMA_AXIAWCR_RHC_INDEX 16
154#define DMA_AXIAWCR_RHC_WIDTH 4
155#define DMA_AXIAWCR_RHD_INDEX 20
156#define DMA_AXIAWCR_RHD_WIDTH 2
157#define DMA_AXIAWCR_TDC_INDEX 24
158#define DMA_AXIAWCR_TDC_WIDTH 4
159#define DMA_AXIAWCR_TDD_INDEX 28
160#define DMA_AXIAWCR_TDD_WIDTH 2
161#define DMA_DSR0_RPS_INDEX 8
162#define DMA_DSR0_RPS_WIDTH 4
163#define DMA_DSR0_TPS_INDEX 12
164#define DMA_DSR0_TPS_WIDTH 4
165#define DMA_ISR_MACIS_INDEX 17
166#define DMA_ISR_MACIS_WIDTH 1
167#define DMA_ISR_MTLIS_INDEX 16
168#define DMA_ISR_MTLIS_WIDTH 1
169#define DMA_MR_SWR_INDEX 0
170#define DMA_MR_SWR_WIDTH 1
171#define DMA_SBMR_EAME_INDEX 11
172#define DMA_SBMR_EAME_WIDTH 1
173#define DMA_SBMR_UNDEF_INDEX 0
174#define DMA_SBMR_UNDEF_WIDTH 1
175
176/* DMA channel register offsets
177 * Multiple channels can be active. The first channel has registers
178 * that begin at 0x3100. Each subsequent channel has registers that
179 * are accessed using an offset of 0x80 from the previous channel.
180 */
181#define DMA_CH_BASE 0x3100
182#define DMA_CH_INC 0x80
183
184#define DMA_CH_CR 0x00
185#define DMA_CH_TCR 0x04
186#define DMA_CH_RCR 0x08
187#define DMA_CH_TDLR_HI 0x10
188#define DMA_CH_TDLR_LO 0x14
189#define DMA_CH_RDLR_HI 0x18
190#define DMA_CH_RDLR_LO 0x1c
191#define DMA_CH_TDTR_LO 0x24
192#define DMA_CH_RDTR_LO 0x2c
193#define DMA_CH_TDRLR 0x30
194#define DMA_CH_RDRLR 0x34
195#define DMA_CH_IER 0x38
196#define DMA_CH_RIWT 0x3c
197#define DMA_CH_CATDR_LO 0x44
198#define DMA_CH_CARDR_LO 0x4c
199#define DMA_CH_CATBR_HI 0x50
200#define DMA_CH_CATBR_LO 0x54
201#define DMA_CH_CARBR_HI 0x58
202#define DMA_CH_CARBR_LO 0x5c
203#define DMA_CH_SR 0x60
204
205/* DMA channel register entry bit positions and sizes */
206#define DMA_CH_CR_PBLX8_INDEX 16
207#define DMA_CH_CR_PBLX8_WIDTH 1
208#define DMA_CH_IER_AIE_INDEX 15
209#define DMA_CH_IER_AIE_WIDTH 1
210#define DMA_CH_IER_FBEE_INDEX 12
211#define DMA_CH_IER_FBEE_WIDTH 1
212#define DMA_CH_IER_NIE_INDEX 16
213#define DMA_CH_IER_NIE_WIDTH 1
214#define DMA_CH_IER_RBUE_INDEX 7
215#define DMA_CH_IER_RBUE_WIDTH 1
216#define DMA_CH_IER_RIE_INDEX 6
217#define DMA_CH_IER_RIE_WIDTH 1
218#define DMA_CH_IER_RSE_INDEX 8
219#define DMA_CH_IER_RSE_WIDTH 1
220#define DMA_CH_IER_TBUE_INDEX 2
221#define DMA_CH_IER_TBUE_WIDTH 1
222#define DMA_CH_IER_TIE_INDEX 0
223#define DMA_CH_IER_TIE_WIDTH 1
224#define DMA_CH_IER_TXSE_INDEX 1
225#define DMA_CH_IER_TXSE_WIDTH 1
226#define DMA_CH_RCR_PBL_INDEX 16
227#define DMA_CH_RCR_PBL_WIDTH 6
228#define DMA_CH_RCR_RBSZ_INDEX 1
229#define DMA_CH_RCR_RBSZ_WIDTH 14
230#define DMA_CH_RCR_SR_INDEX 0
231#define DMA_CH_RCR_SR_WIDTH 1
232#define DMA_CH_RIWT_RWT_INDEX 0
233#define DMA_CH_RIWT_RWT_WIDTH 8
234#define DMA_CH_SR_FBE_INDEX 12
235#define DMA_CH_SR_FBE_WIDTH 1
236#define DMA_CH_SR_RBU_INDEX 7
237#define DMA_CH_SR_RBU_WIDTH 1
238#define DMA_CH_SR_RI_INDEX 6
239#define DMA_CH_SR_RI_WIDTH 1
240#define DMA_CH_SR_RPS_INDEX 8
241#define DMA_CH_SR_RPS_WIDTH 1
242#define DMA_CH_SR_TBU_INDEX 2
243#define DMA_CH_SR_TBU_WIDTH 1
244#define DMA_CH_SR_TI_INDEX 0
245#define DMA_CH_SR_TI_WIDTH 1
246#define DMA_CH_SR_TPS_INDEX 1
247#define DMA_CH_SR_TPS_WIDTH 1
248#define DMA_CH_TCR_OSP_INDEX 4
249#define DMA_CH_TCR_OSP_WIDTH 1
250#define DMA_CH_TCR_PBL_INDEX 16
251#define DMA_CH_TCR_PBL_WIDTH 6
252#define DMA_CH_TCR_ST_INDEX 0
253#define DMA_CH_TCR_ST_WIDTH 1
254#define DMA_CH_TCR_TSE_INDEX 12
255#define DMA_CH_TCR_TSE_WIDTH 1
256
257/* DMA channel register values */
258#define DMA_OSP_DISABLE 0x00
259#define DMA_OSP_ENABLE 0x01
260#define DMA_PBL_1 1
261#define DMA_PBL_2 2
262#define DMA_PBL_4 4
263#define DMA_PBL_8 8
264#define DMA_PBL_16 16
265#define DMA_PBL_32 32
266#define DMA_PBL_64 64 /* 8 x 8 */
267#define DMA_PBL_128 128 /* 8 x 16 */
268#define DMA_PBL_256 256 /* 8 x 32 */
269#define DMA_PBL_X8_DISABLE 0x00
270#define DMA_PBL_X8_ENABLE 0x01
271
272
273/* MAC register offsets */
274#define MAC_TCR 0x0000
275#define MAC_RCR 0x0004
276#define MAC_PFR 0x0008
277#define MAC_WTR 0x000c
278#define MAC_HTR0 0x0010
279#define MAC_HTR1 0x0014
280#define MAC_HTR2 0x0018
281#define MAC_HTR3 0x001c
282#define MAC_HTR4 0x0020
283#define MAC_HTR5 0x0024
284#define MAC_HTR6 0x0028
285#define MAC_HTR7 0x002c
286#define MAC_VLANTR 0x0050
287#define MAC_VLANHTR 0x0058
288#define MAC_VLANIR 0x0060
289#define MAC_IVLANIR 0x0064
290#define MAC_RETMR 0x006c
291#define MAC_Q0TFCR 0x0070
292#define MAC_RFCR 0x0090
293#define MAC_RQC0R 0x00a0
294#define MAC_RQC1R 0x00a4
295#define MAC_RQC2R 0x00a8
296#define MAC_RQC3R 0x00ac
297#define MAC_ISR 0x00b0
298#define MAC_IER 0x00b4
299#define MAC_RTSR 0x00b8
300#define MAC_PMTCSR 0x00c0
301#define MAC_RWKPFR 0x00c4
302#define MAC_LPICSR 0x00d0
303#define MAC_LPITCR 0x00d4
304#define MAC_VR 0x0110
305#define MAC_DR 0x0114
306#define MAC_HWF0R 0x011c
307#define MAC_HWF1R 0x0120
308#define MAC_HWF2R 0x0124
309#define MAC_GPIOCR 0x0278
310#define MAC_GPIOSR 0x027c
311#define MAC_MACA0HR 0x0300
312#define MAC_MACA0LR 0x0304
313#define MAC_MACA1HR 0x0308
314#define MAC_MACA1LR 0x030c
315
316#define MAC_QTFCR_INC 4
317#define MAC_MACA_INC 4
318
319/* MAC register entry bit positions and sizes */
320#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
321#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
322#define MAC_HWF0R_ARPOFFSEL_INDEX 9
323#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
324#define MAC_HWF0R_EEESEL_INDEX 13
325#define MAC_HWF0R_EEESEL_WIDTH 1
326#define MAC_HWF0R_GMIISEL_INDEX 1
327#define MAC_HWF0R_GMIISEL_WIDTH 1
328#define MAC_HWF0R_MGKSEL_INDEX 7
329#define MAC_HWF0R_MGKSEL_WIDTH 1
330#define MAC_HWF0R_MMCSEL_INDEX 8
331#define MAC_HWF0R_MMCSEL_WIDTH 1
332#define MAC_HWF0R_RWKSEL_INDEX 6
333#define MAC_HWF0R_RWKSEL_WIDTH 1
334#define MAC_HWF0R_RXCOESEL_INDEX 16
335#define MAC_HWF0R_RXCOESEL_WIDTH 1
336#define MAC_HWF0R_SAVLANINS_INDEX 27
337#define MAC_HWF0R_SAVLANINS_WIDTH 1
338#define MAC_HWF0R_SMASEL_INDEX 5
339#define MAC_HWF0R_SMASEL_WIDTH 1
340#define MAC_HWF0R_TSSEL_INDEX 12
341#define MAC_HWF0R_TSSEL_WIDTH 1
342#define MAC_HWF0R_TSSTSSEL_INDEX 25
343#define MAC_HWF0R_TSSTSSEL_WIDTH 2
344#define MAC_HWF0R_TXCOESEL_INDEX 14
345#define MAC_HWF0R_TXCOESEL_WIDTH 1
346#define MAC_HWF0R_VLHASH_INDEX 4
347#define MAC_HWF0R_VLHASH_WIDTH 1
348#define MAC_HWF1R_ADVTHWORD_INDEX 13
349#define MAC_HWF1R_ADVTHWORD_WIDTH 1
350#define MAC_HWF1R_DBGMEMA_INDEX 19
351#define MAC_HWF1R_DBGMEMA_WIDTH 1
352#define MAC_HWF1R_DCBEN_INDEX 16
353#define MAC_HWF1R_DCBEN_WIDTH 1
354#define MAC_HWF1R_HASHTBLSZ_INDEX 24
355#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
356#define MAC_HWF1R_L3L4FNUM_INDEX 27
357#define MAC_HWF1R_L3L4FNUM_WIDTH 4
358#define MAC_HWF1R_RSSEN_INDEX 20
359#define MAC_HWF1R_RSSEN_WIDTH 1
360#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
361#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
362#define MAC_HWF1R_SPHEN_INDEX 17
363#define MAC_HWF1R_SPHEN_WIDTH 1
364#define MAC_HWF1R_TSOEN_INDEX 18
365#define MAC_HWF1R_TSOEN_WIDTH 1
366#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
367#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
368#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
369#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
370#define MAC_HWF2R_PPSOUTNUM_INDEX 24
371#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
372#define MAC_HWF2R_RXCHCNT_INDEX 12
373#define MAC_HWF2R_RXCHCNT_WIDTH 4
374#define MAC_HWF2R_RXQCNT_INDEX 0
375#define MAC_HWF2R_RXQCNT_WIDTH 4
376#define MAC_HWF2R_TXCHCNT_INDEX 18
377#define MAC_HWF2R_TXCHCNT_WIDTH 4
378#define MAC_HWF2R_TXQCNT_INDEX 6
379#define MAC_HWF2R_TXQCNT_WIDTH 4
380#define MAC_ISR_MMCRXIS_INDEX 9
381#define MAC_ISR_MMCRXIS_WIDTH 1
382#define MAC_ISR_MMCTXIS_INDEX 10
383#define MAC_ISR_MMCTXIS_WIDTH 1
384#define MAC_ISR_PMTIS_INDEX 4
385#define MAC_ISR_PMTIS_WIDTH 1
386#define MAC_MACA1HR_AE_INDEX 31
387#define MAC_MACA1HR_AE_WIDTH 1
388#define MAC_PFR_HMC_INDEX 2
389#define MAC_PFR_HMC_WIDTH 1
390#define MAC_PFR_HUC_INDEX 1
391#define MAC_PFR_HUC_WIDTH 1
392#define MAC_PFR_PM_INDEX 4
393#define MAC_PFR_PM_WIDTH 1
394#define MAC_PFR_PR_INDEX 0
395#define MAC_PFR_PR_WIDTH 1
396#define MAC_PMTCSR_MGKPKTEN_INDEX 1
397#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
398#define MAC_PMTCSR_PWRDWN_INDEX 0
399#define MAC_PMTCSR_PWRDWN_WIDTH 1
400#define MAC_PMTCSR_RWKFILTRST_INDEX 31
401#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
402#define MAC_PMTCSR_RWKPKTEN_INDEX 2
403#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
404#define MAC_Q0TFCR_PT_INDEX 16
405#define MAC_Q0TFCR_PT_WIDTH 16
406#define MAC_Q0TFCR_TFE_INDEX 1
407#define MAC_Q0TFCR_TFE_WIDTH 1
408#define MAC_RCR_ACS_INDEX 1
409#define MAC_RCR_ACS_WIDTH 1
410#define MAC_RCR_CST_INDEX 2
411#define MAC_RCR_CST_WIDTH 1
412#define MAC_RCR_DCRCC_INDEX 3
413#define MAC_RCR_DCRCC_WIDTH 1
414#define MAC_RCR_IPC_INDEX 9
415#define MAC_RCR_IPC_WIDTH 1
416#define MAC_RCR_JE_INDEX 8
417#define MAC_RCR_JE_WIDTH 1
418#define MAC_RCR_LM_INDEX 10
419#define MAC_RCR_LM_WIDTH 1
420#define MAC_RCR_RE_INDEX 0
421#define MAC_RCR_RE_WIDTH 1
422#define MAC_RFCR_RFE_INDEX 0
423#define MAC_RFCR_RFE_WIDTH 1
424#define MAC_RQC0R_RXQ0EN_INDEX 0
425#define MAC_RQC0R_RXQ0EN_WIDTH 2
426#define MAC_TCR_SS_INDEX 29
427#define MAC_TCR_SS_WIDTH 2
428#define MAC_TCR_TE_INDEX 0
429#define MAC_TCR_TE_WIDTH 1
430#define MAC_VLANTR_DOVLTC_INDEX 20
431#define MAC_VLANTR_DOVLTC_WIDTH 1
432#define MAC_VLANTR_ERSVLM_INDEX 19
433#define MAC_VLANTR_ERSVLM_WIDTH 1
434#define MAC_VLANTR_ESVL_INDEX 18
435#define MAC_VLANTR_ESVL_WIDTH 1
436#define MAC_VLANTR_EVLS_INDEX 21
437#define MAC_VLANTR_EVLS_WIDTH 2
438#define MAC_VLANTR_EVLRXS_INDEX 24
439#define MAC_VLANTR_EVLRXS_WIDTH 1
440#define MAC_VR_DEVID_INDEX 8
441#define MAC_VR_DEVID_WIDTH 8
442#define MAC_VR_SNPSVER_INDEX 0
443#define MAC_VR_SNPSVER_WIDTH 8
444#define MAC_VR_USERVER_INDEX 16
445#define MAC_VR_USERVER_WIDTH 8
446
447/* MMC register offsets */
448#define MMC_CR 0x0800
449#define MMC_RISR 0x0804
450#define MMC_TISR 0x0808
451#define MMC_RIER 0x080c
452#define MMC_TIER 0x0810
453#define MMC_TXOCTETCOUNT_GB_LO 0x0814
454#define MMC_TXOCTETCOUNT_GB_HI 0x0818
455#define MMC_TXFRAMECOUNT_GB_LO 0x081c
456#define MMC_TXFRAMECOUNT_GB_HI 0x0820
457#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
458#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
459#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
460#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
461#define MMC_TX64OCTETS_GB_LO 0x0834
462#define MMC_TX64OCTETS_GB_HI 0x0838
463#define MMC_TX65TO127OCTETS_GB_LO 0x083c
464#define MMC_TX65TO127OCTETS_GB_HI 0x0840
465#define MMC_TX128TO255OCTETS_GB_LO 0x0844
466#define MMC_TX128TO255OCTETS_GB_HI 0x0848
467#define MMC_TX256TO511OCTETS_GB_LO 0x084c
468#define MMC_TX256TO511OCTETS_GB_HI 0x0850
469#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
470#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
471#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
472#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
473#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
474#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
475#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
476#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
477#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
478#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
479#define MMC_TXUNDERFLOWERROR_LO 0x087c
480#define MMC_TXUNDERFLOWERROR_HI 0x0880
481#define MMC_TXOCTETCOUNT_G_LO 0x0884
482#define MMC_TXOCTETCOUNT_G_HI 0x0888
483#define MMC_TXFRAMECOUNT_G_LO 0x088c
484#define MMC_TXFRAMECOUNT_G_HI 0x0890
485#define MMC_TXPAUSEFRAMES_LO 0x0894
486#define MMC_TXPAUSEFRAMES_HI 0x0898
487#define MMC_TXVLANFRAMES_G_LO 0x089c
488#define MMC_TXVLANFRAMES_G_HI 0x08a0
489#define MMC_RXFRAMECOUNT_GB_LO 0x0900
490#define MMC_RXFRAMECOUNT_GB_HI 0x0904
491#define MMC_RXOCTETCOUNT_GB_LO 0x0908
492#define MMC_RXOCTETCOUNT_GB_HI 0x090c
493#define MMC_RXOCTETCOUNT_G_LO 0x0910
494#define MMC_RXOCTETCOUNT_G_HI 0x0914
495#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
496#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
497#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
498#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
499#define MMC_RXCRCERROR_LO 0x0928
500#define MMC_RXCRCERROR_HI 0x092c
501#define MMC_RXRUNTERROR 0x0930
502#define MMC_RXJABBERERROR 0x0934
503#define MMC_RXUNDERSIZE_G 0x0938
504#define MMC_RXOVERSIZE_G 0x093c
505#define MMC_RX64OCTETS_GB_LO 0x0940
506#define MMC_RX64OCTETS_GB_HI 0x0944
507#define MMC_RX65TO127OCTETS_GB_LO 0x0948
508#define MMC_RX65TO127OCTETS_GB_HI 0x094c
509#define MMC_RX128TO255OCTETS_GB_LO 0x0950
510#define MMC_RX128TO255OCTETS_GB_HI 0x0954
511#define MMC_RX256TO511OCTETS_GB_LO 0x0958
512#define MMC_RX256TO511OCTETS_GB_HI 0x095c
513#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
514#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
515#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
516#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
517#define MMC_RXUNICASTFRAMES_G_LO 0x0970
518#define MMC_RXUNICASTFRAMES_G_HI 0x0974
519#define MMC_RXLENGTHERROR_LO 0x0978
520#define MMC_RXLENGTHERROR_HI 0x097c
521#define MMC_RXOUTOFRANGETYPE_LO 0x0980
522#define MMC_RXOUTOFRANGETYPE_HI 0x0984
523#define MMC_RXPAUSEFRAMES_LO 0x0988
524#define MMC_RXPAUSEFRAMES_HI 0x098c
525#define MMC_RXFIFOOVERFLOW_LO 0x0990
526#define MMC_RXFIFOOVERFLOW_HI 0x0994
527#define MMC_RXVLANFRAMES_GB_LO 0x0998
528#define MMC_RXVLANFRAMES_GB_HI 0x099c
529#define MMC_RXWATCHDOGERROR 0x09a0
530
531/* MMC register entry bit positions and sizes */
532#define MMC_CR_CR_INDEX 0
533#define MMC_CR_CR_WIDTH 1
534#define MMC_CR_CSR_INDEX 1
535#define MMC_CR_CSR_WIDTH 1
536#define MMC_CR_ROR_INDEX 2
537#define MMC_CR_ROR_WIDTH 1
538#define MMC_CR_MCF_INDEX 3
539#define MMC_CR_MCF_WIDTH 1
540#define MMC_CR_MCT_INDEX 4
541#define MMC_CR_MCT_WIDTH 2
542#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
543#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
544#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
545#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
546#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
547#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
548#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
549#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
550#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
551#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
552#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
553#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
554#define MMC_RISR_RXCRCERROR_INDEX 5
555#define MMC_RISR_RXCRCERROR_WIDTH 1
556#define MMC_RISR_RXRUNTERROR_INDEX 6
557#define MMC_RISR_RXRUNTERROR_WIDTH 1
558#define MMC_RISR_RXJABBERERROR_INDEX 7
559#define MMC_RISR_RXJABBERERROR_WIDTH 1
560#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
561#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
562#define MMC_RISR_RXOVERSIZE_G_INDEX 9
563#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
564#define MMC_RISR_RX64OCTETS_GB_INDEX 10
565#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
566#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
567#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
568#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
569#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
570#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
571#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
572#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
573#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
574#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
575#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
576#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
577#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
578#define MMC_RISR_RXLENGTHERROR_INDEX 17
579#define MMC_RISR_RXLENGTHERROR_WIDTH 1
580#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
581#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
582#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
583#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
584#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
585#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
586#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
587#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
588#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
589#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
590#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
591#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
592#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
593#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
594#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
595#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
596#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
597#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
598#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
599#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
600#define MMC_TISR_TX64OCTETS_GB_INDEX 4
601#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
602#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
603#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
604#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
605#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
606#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
607#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
608#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
609#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
610#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
611#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
612#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
613#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
614#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
615#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
616#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
617#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
618#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
619#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
620#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
621#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
622#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
623#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
624#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
625#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
626#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
627#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
628
629/* MTL register offsets */
630#define MTL_OMR 0x1000
631#define MTL_FDCR 0x1008
632#define MTL_FDSR 0x100c
633#define MTL_FDDR 0x1010
634#define MTL_ISR 0x1020
635#define MTL_RQDCM0R 0x1030
636#define MTL_TCPM0R 0x1040
637#define MTL_TCPM1R 0x1044
638
639#define MTL_RQDCM_INC 4
640#define MTL_RQDCM_Q_PER_REG 4
641
642/* MTL register entry bit positions and sizes */
643#define MTL_OMR_ETSALG_INDEX 5
644#define MTL_OMR_ETSALG_WIDTH 2
645#define MTL_OMR_RAA_INDEX 2
646#define MTL_OMR_RAA_WIDTH 1
647
648/* MTL queue register offsets
649 * Multiple queues can be active. The first queue has registers
650 * that begin at 0x1100. Each subsequent queue has registers that
651 * are accessed using an offset of 0x80 from the previous queue.
652 */
653#define MTL_Q_BASE 0x1100
654#define MTL_Q_INC 0x80
655
656#define MTL_Q_TQOMR 0x00
657#define MTL_Q_TQUR 0x04
658#define MTL_Q_TQDR 0x08
659#define MTL_Q_TCECR 0x10
660#define MTL_Q_TCESR 0x14
661#define MTL_Q_TCQWR 0x18
662#define MTL_Q_RQOMR 0x40
663#define MTL_Q_RQMPOCR 0x44
664#define MTL_Q_RQDR 0x4c
665#define MTL_Q_IER 0x70
666#define MTL_Q_ISR 0x74
667
668/* MTL queue register entry bit positions and sizes */
669#define MTL_Q_TCQWR_QW_INDEX 0
670#define MTL_Q_TCQWR_QW_WIDTH 21
671#define MTL_Q_RQOMR_EHFC_INDEX 7
672#define MTL_Q_RQOMR_EHFC_WIDTH 1
673#define MTL_Q_RQOMR_RFA_INDEX 8
674#define MTL_Q_RQOMR_RFA_WIDTH 3
675#define MTL_Q_RQOMR_RFD_INDEX 13
676#define MTL_Q_RQOMR_RFD_WIDTH 3
677#define MTL_Q_RQOMR_RQS_INDEX 16
678#define MTL_Q_RQOMR_RQS_WIDTH 9
679#define MTL_Q_RQOMR_RSF_INDEX 5
680#define MTL_Q_RQOMR_RSF_WIDTH 1
681#define MTL_Q_RQOMR_RTC_INDEX 0
682#define MTL_Q_RQOMR_RTC_WIDTH 2
683#define MTL_Q_TQOMR_FTQ_INDEX 0
684#define MTL_Q_TQOMR_FTQ_WIDTH 1
685#define MTL_Q_TQOMR_TQS_INDEX 16
686#define MTL_Q_TQOMR_TQS_WIDTH 10
687#define MTL_Q_TQOMR_TSF_INDEX 1
688#define MTL_Q_TQOMR_TSF_WIDTH 1
689#define MTL_Q_TQOMR_TTC_INDEX 4
690#define MTL_Q_TQOMR_TTC_WIDTH 3
691#define MTL_Q_TQOMR_TXQEN_INDEX 2
692#define MTL_Q_TQOMR_TXQEN_WIDTH 2
693
694/* MTL queue register value */
695#define MTL_RSF_DISABLE 0x00
696#define MTL_RSF_ENABLE 0x01
697#define MTL_TSF_DISABLE 0x00
698#define MTL_TSF_ENABLE 0x01
699
700#define MTL_RX_THRESHOLD_64 0x00
701#define MTL_RX_THRESHOLD_96 0x02
702#define MTL_RX_THRESHOLD_128 0x03
703#define MTL_TX_THRESHOLD_32 0x01
704#define MTL_TX_THRESHOLD_64 0x00
705#define MTL_TX_THRESHOLD_96 0x02
706#define MTL_TX_THRESHOLD_128 0x03
707#define MTL_TX_THRESHOLD_192 0x04
708#define MTL_TX_THRESHOLD_256 0x05
709#define MTL_TX_THRESHOLD_384 0x06
710#define MTL_TX_THRESHOLD_512 0x07
711
712#define MTL_ETSALG_WRR 0x00
713#define MTL_ETSALG_WFQ 0x01
714#define MTL_ETSALG_DWRR 0x02
715#define MTL_RAA_SP 0x00
716#define MTL_RAA_WSP 0x01
717
718#define MTL_Q_DISABLED 0x00
719#define MTL_Q_ENABLED 0x02
720
721
722/* MTL traffic class register offsets
723 * Multiple traffic classes can be active. The first class has registers
724 * that begin at 0x1100. Each subsequent queue has registers that
725 * are accessed using an offset of 0x80 from the previous queue.
726 */
727#define MTL_TC_BASE MTL_Q_BASE
728#define MTL_TC_INC MTL_Q_INC
729
730#define MTL_TC_ETSCR 0x10
731
732/* MTL traffic class register entry bit positions and sizes */
733#define MTL_TC_ETSCR_TSA_INDEX 0
734#define MTL_TC_ETSCR_TSA_WIDTH 2
735
736/* MTL traffic class register value */
737#define MTL_TSA_SP 0x00
738#define MTL_TSA_ETS 0x02
739
740
741/* PCS MMD select register offset
742 * The MMD select register is used for accessing PCS registers
743 * when the underlying APB3 interface is using indirect addressing.
744 * Indirect addressing requires accessing registers in two phases,
745 * an address phase and a data phase. The address phases requires
746 * writing an address selection value to the MMD select regiesters.
747 */
748#define PCS_MMD_SELECT 0xff
749
750
751/* Descriptor/Packet entry bit positions and sizes */
752#define RX_PACKET_ERRORS_CRC_INDEX 2
753#define RX_PACKET_ERRORS_CRC_WIDTH 1
754#define RX_PACKET_ERRORS_FRAME_INDEX 3
755#define RX_PACKET_ERRORS_FRAME_WIDTH 1
756#define RX_PACKET_ERRORS_LENGTH_INDEX 0
757#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
758#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
759#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
760
761#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
762#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
763#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
764#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
765#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
766#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
767
768#define RX_NORMAL_DESC0_OVT_INDEX 0
769#define RX_NORMAL_DESC0_OVT_WIDTH 16
770#define RX_NORMAL_DESC3_ES_INDEX 15
771#define RX_NORMAL_DESC3_ES_WIDTH 1
772#define RX_NORMAL_DESC3_ETLT_INDEX 16
773#define RX_NORMAL_DESC3_ETLT_WIDTH 4
774#define RX_NORMAL_DESC3_INTE_INDEX 30
775#define RX_NORMAL_DESC3_INTE_WIDTH 1
776#define RX_NORMAL_DESC3_LD_INDEX 28
777#define RX_NORMAL_DESC3_LD_WIDTH 1
778#define RX_NORMAL_DESC3_OWN_INDEX 31
779#define RX_NORMAL_DESC3_OWN_WIDTH 1
780#define RX_NORMAL_DESC3_PL_INDEX 0
781#define RX_NORMAL_DESC3_PL_WIDTH 14
782
783#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
784#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
785#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
786#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
787#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
788#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
789
790#define TX_CONTEXT_DESC2_MSS_INDEX 0
791#define TX_CONTEXT_DESC2_MSS_WIDTH 15
792#define TX_CONTEXT_DESC3_CTXT_INDEX 30
793#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
794#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
795#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
796#define TX_CONTEXT_DESC3_VLTV_INDEX 16
797#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
798#define TX_CONTEXT_DESC3_VT_INDEX 0
799#define TX_CONTEXT_DESC3_VT_WIDTH 16
800
801#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
802#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
803#define TX_NORMAL_DESC2_IC_INDEX 31
804#define TX_NORMAL_DESC2_IC_WIDTH 1
805#define TX_NORMAL_DESC2_VTIR_INDEX 14
806#define TX_NORMAL_DESC2_VTIR_WIDTH 2
807#define TX_NORMAL_DESC3_CIC_INDEX 16
808#define TX_NORMAL_DESC3_CIC_WIDTH 2
809#define TX_NORMAL_DESC3_CPC_INDEX 26
810#define TX_NORMAL_DESC3_CPC_WIDTH 2
811#define TX_NORMAL_DESC3_CTXT_INDEX 30
812#define TX_NORMAL_DESC3_CTXT_WIDTH 1
813#define TX_NORMAL_DESC3_FD_INDEX 29
814#define TX_NORMAL_DESC3_FD_WIDTH 1
815#define TX_NORMAL_DESC3_FL_INDEX 0
816#define TX_NORMAL_DESC3_FL_WIDTH 15
817#define TX_NORMAL_DESC3_LD_INDEX 28
818#define TX_NORMAL_DESC3_LD_WIDTH 1
819#define TX_NORMAL_DESC3_OWN_INDEX 31
820#define TX_NORMAL_DESC3_OWN_WIDTH 1
821#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
822#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
823#define TX_NORMAL_DESC3_TCPPL_INDEX 0
824#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
825#define TX_NORMAL_DESC3_TSE_INDEX 18
826#define TX_NORMAL_DESC3_TSE_WIDTH 1
827
828#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
829
830/* MDIO undefined or vendor specific registers */
831#ifndef MDIO_AN_COMP_STAT
832#define MDIO_AN_COMP_STAT 0x0030
833#endif
834
835
836/* Bit setting and getting macros
837 * The get macro will extract the current bit field value from within
838 * the variable
839 *
840 * The set macro will clear the current bit field value within the
841 * variable and then set the bit field of the variable to the
842 * specified value
843 */
844#define GET_BITS(_var, _index, _width) \
845 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
846
847#define SET_BITS(_var, _index, _width, _val) \
848do { \
849 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
850 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
851} while (0)
852
853#define GET_BITS_LE(_var, _index, _width) \
854 ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
855
856#define SET_BITS_LE(_var, _index, _width, _val) \
857do { \
858 (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
859 (_var) |= cpu_to_le32((((_val) & \
860 ((0x1 << (_width)) - 1)) << (_index))); \
861} while (0)
862
863
864/* Bit setting and getting macros based on register fields
865 * The get macro uses the bit field definitions formed using the input
866 * names to extract the current bit field value from within the
867 * variable
868 *
869 * The set macro uses the bit field definitions formed using the input
870 * names to set the bit field of the variable to the specified value
871 */
872#define XGMAC_GET_BITS(_var, _prefix, _field) \
873 GET_BITS((_var), \
874 _prefix##_##_field##_INDEX, \
875 _prefix##_##_field##_WIDTH)
876
877#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
878 SET_BITS((_var), \
879 _prefix##_##_field##_INDEX, \
880 _prefix##_##_field##_WIDTH, (_val))
881
882#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
883 GET_BITS_LE((_var), \
884 _prefix##_##_field##_INDEX, \
885 _prefix##_##_field##_WIDTH)
886
887#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
888 SET_BITS_LE((_var), \
889 _prefix##_##_field##_INDEX, \
890 _prefix##_##_field##_WIDTH, (_val))
891
892
893/* Macros for reading or writing registers
894 * The ioread macros will get bit fields or full values using the
895 * register definitions formed using the input names
896 *
897 * The iowrite macros will set bit fields or full values using the
898 * register definitions formed using the input names
899 */
900#define XGMAC_IOREAD(_pdata, _reg) \
901 ioread32((_pdata)->xgmac_regs + _reg)
902
903#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
904 GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
905 _reg##_##_field##_INDEX, \
906 _reg##_##_field##_WIDTH)
907
908#define XGMAC_IOWRITE(_pdata, _reg, _val) \
909 iowrite32((_val), (_pdata)->xgmac_regs + _reg)
910
911#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
912do { \
913 u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
914 SET_BITS(reg_val, \
915 _reg##_##_field##_INDEX, \
916 _reg##_##_field##_WIDTH, (_val)); \
917 XGMAC_IOWRITE((_pdata), _reg, reg_val); \
918} while (0)
919
920
921/* Macros for reading or writing MTL queue or traffic class registers
922 * Similar to the standard read and write macros except that the
923 * base register value is calculated by the queue or traffic class number
924 */
925#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
926 ioread32((_pdata)->xgmac_regs + \
927 MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
928
929#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
930 GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
931 _reg##_##_field##_INDEX, \
932 _reg##_##_field##_WIDTH)
933
934#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
935 iowrite32((_val), (_pdata)->xgmac_regs + \
936 MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
937
938#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
939do { \
940 u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
941 SET_BITS(reg_val, \
942 _reg##_##_field##_INDEX, \
943 _reg##_##_field##_WIDTH, (_val)); \
944 XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
945} while (0)
946
947
948/* Macros for reading or writing DMA channel registers
949 * Similar to the standard read and write macros except that the
950 * base register value is obtained from the ring
951 */
952#define XGMAC_DMA_IOREAD(_channel, _reg) \
953 ioread32((_channel)->dma_regs + _reg)
954
955#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
956 GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
957 _reg##_##_field##_INDEX, \
958 _reg##_##_field##_WIDTH)
959
960#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
961 iowrite32((_val), (_channel)->dma_regs + _reg)
962
963#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
964do { \
965 u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
966 SET_BITS(reg_val, \
967 _reg##_##_field##_INDEX, \
968 _reg##_##_field##_WIDTH, (_val)); \
969 XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
970} while (0)
971
972
973/* Macros for building, reading or writing register values or bits
974 * within the register values of XPCS registers.
975 */
976#define XPCS_IOWRITE(_pdata, _off, _val) \
977 iowrite32(_val, (_pdata)->xpcs_regs + (_off))
978
979#define XPCS_IOREAD(_pdata, _off) \
980 ioread32((_pdata)->xpcs_regs + (_off))
981
982
983/* Macros for building, reading or writing register values or bits
984 * using MDIO. Different from above because of the use of standardized
985 * Linux include values. No shifting is performed with the bit
986 * operations, everything works on mask values.
987 */
988#define XMDIO_READ(_pdata, _mmd, _reg) \
989 ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
990 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
991
992#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
993 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
994
995#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
996 ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
997 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
998
999#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
1000do { \
1001 u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
1002 mmd_val &= ~_mask; \
1003 mmd_val |= (_val); \
1004 XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
1005} while (0)
1006
1007#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000000..6bb76d5c817b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,375 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/debugfs.h>
118#include <linux/module.h>
119#include <linux/slab.h>
120
121#include "xgbe.h"
122#include "xgbe-common.h"
123
124
125static ssize_t xgbe_common_read(char __user *buffer, size_t count,
126 loff_t *ppos, unsigned int value)
127{
128 char *buf;
129 ssize_t len;
130
131 if (*ppos != 0)
132 return 0;
133
134 buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
135 if (!buf)
136 return -ENOMEM;
137
138 if (count < strlen(buf)) {
139 kfree(buf);
140 return -ENOSPC;
141 }
142
143 len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
144 kfree(buf);
145
146 return len;
147}
148
149static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
150 loff_t *ppos, unsigned int *value)
151{
152 char workarea[32];
153 ssize_t len;
154 unsigned int scan_value;
155
156 if (*ppos != 0)
157 return 0;
158
159 if (count >= sizeof(workarea))
160 return -ENOSPC;
161
162 len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
163 buffer, count);
164 if (len < 0)
165 return len;
166
167 workarea[len] = '\0';
168 if (sscanf(workarea, "%x", &scan_value) == 1)
169 *value = scan_value;
170 else
171 return -EIO;
172
173 return len;
174}
175
176static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
177 size_t count, loff_t *ppos)
178{
179 struct xgbe_prv_data *pdata = filp->private_data;
180
181 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
182}
183
184static ssize_t xgmac_reg_addr_write(struct file *filp,
185 const char __user *buffer,
186 size_t count, loff_t *ppos)
187{
188 struct xgbe_prv_data *pdata = filp->private_data;
189
190 return xgbe_common_write(buffer, count, ppos,
191 &pdata->debugfs_xgmac_reg);
192}
193
194static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
195 size_t count, loff_t *ppos)
196{
197 struct xgbe_prv_data *pdata = filp->private_data;
198 unsigned int value;
199
200 value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
201
202 return xgbe_common_read(buffer, count, ppos, value);
203}
204
205static ssize_t xgmac_reg_value_write(struct file *filp,
206 const char __user *buffer,
207 size_t count, loff_t *ppos)
208{
209 struct xgbe_prv_data *pdata = filp->private_data;
210 unsigned int value;
211 ssize_t len;
212
213 len = xgbe_common_write(buffer, count, ppos, &value);
214 if (len < 0)
215 return len;
216
217 XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
218
219 return len;
220}
221
222static const struct file_operations xgmac_reg_addr_fops = {
223 .owner = THIS_MODULE,
224 .open = simple_open,
225 .read = xgmac_reg_addr_read,
226 .write = xgmac_reg_addr_write,
227};
228
229static const struct file_operations xgmac_reg_value_fops = {
230 .owner = THIS_MODULE,
231 .open = simple_open,
232 .read = xgmac_reg_value_read,
233 .write = xgmac_reg_value_write,
234};
235
236static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
237 size_t count, loff_t *ppos)
238{
239 struct xgbe_prv_data *pdata = filp->private_data;
240
241 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
242}
243
244static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
245 size_t count, loff_t *ppos)
246{
247 struct xgbe_prv_data *pdata = filp->private_data;
248
249 return xgbe_common_write(buffer, count, ppos,
250 &pdata->debugfs_xpcs_mmd);
251}
252
253static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
254 size_t count, loff_t *ppos)
255{
256 struct xgbe_prv_data *pdata = filp->private_data;
257
258 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
259}
260
261static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
262 size_t count, loff_t *ppos)
263{
264 struct xgbe_prv_data *pdata = filp->private_data;
265
266 return xgbe_common_write(buffer, count, ppos,
267 &pdata->debugfs_xpcs_reg);
268}
269
270static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
271 size_t count, loff_t *ppos)
272{
273 struct xgbe_prv_data *pdata = filp->private_data;
274 unsigned int value;
275
276 value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
277 pdata->debugfs_xpcs_reg);
278
279 return xgbe_common_read(buffer, count, ppos, value);
280}
281
282static ssize_t xpcs_reg_value_write(struct file *filp,
283 const char __user *buffer,
284 size_t count, loff_t *ppos)
285{
286 struct xgbe_prv_data *pdata = filp->private_data;
287 unsigned int value;
288 ssize_t len;
289
290 len = xgbe_common_write(buffer, count, ppos, &value);
291 if (len < 0)
292 return len;
293
294 pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
295 pdata->debugfs_xpcs_reg, value);
296
297 return len;
298}
299
300static const struct file_operations xpcs_mmd_fops = {
301 .owner = THIS_MODULE,
302 .open = simple_open,
303 .read = xpcs_mmd_read,
304 .write = xpcs_mmd_write,
305};
306
307static const struct file_operations xpcs_reg_addr_fops = {
308 .owner = THIS_MODULE,
309 .open = simple_open,
310 .read = xpcs_reg_addr_read,
311 .write = xpcs_reg_addr_write,
312};
313
314static const struct file_operations xpcs_reg_value_fops = {
315 .owner = THIS_MODULE,
316 .open = simple_open,
317 .read = xpcs_reg_value_read,
318 .write = xpcs_reg_value_write,
319};
320
321void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
322{
323 struct dentry *pfile;
324 char *buf;
325
326 /* Set defaults */
327 pdata->debugfs_xgmac_reg = 0;
328 pdata->debugfs_xpcs_mmd = 1;
329 pdata->debugfs_xpcs_reg = 0;
330
331 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
332 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
333 if (pdata->xgbe_debugfs == NULL) {
334 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
335 return;
336 }
337
338 pfile = debugfs_create_file("xgmac_register", 0600,
339 pdata->xgbe_debugfs, pdata,
340 &xgmac_reg_addr_fops);
341 if (!pfile)
342 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
343
344 pfile = debugfs_create_file("xgmac_register_value", 0600,
345 pdata->xgbe_debugfs, pdata,
346 &xgmac_reg_value_fops);
347 if (!pfile)
348 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
349
350 pfile = debugfs_create_file("xpcs_mmd", 0600,
351 pdata->xgbe_debugfs, pdata,
352 &xpcs_mmd_fops);
353 if (!pfile)
354 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
355
356 pfile = debugfs_create_file("xpcs_register", 0600,
357 pdata->xgbe_debugfs, pdata,
358 &xpcs_reg_addr_fops);
359 if (!pfile)
360 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
361
362 pfile = debugfs_create_file("xpcs_register_value", 0600,
363 pdata->xgbe_debugfs, pdata,
364 &xpcs_reg_value_fops);
365 if (!pfile)
366 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
367
368 kfree(buf);
369}
370
371void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
372{
373 debugfs_remove_recursive(pdata->xgbe_debugfs);
374 pdata->xgbe_debugfs = NULL;
375}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000000..6f1c85956d50
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,556 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120
121static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
122
123static void xgbe_free_ring(struct xgbe_prv_data *pdata,
124 struct xgbe_ring *ring)
125{
126 struct xgbe_ring_data *rdata;
127 unsigned int i;
128
129 if (!ring)
130 return;
131
132 if (ring->rdata) {
133 for (i = 0; i < ring->rdesc_count; i++) {
134 rdata = GET_DESC_DATA(ring, i);
135 xgbe_unmap_skb(pdata, rdata);
136 }
137
138 kfree(ring->rdata);
139 ring->rdata = NULL;
140 }
141
142 if (ring->rdesc) {
143 dma_free_coherent(pdata->dev,
144 (sizeof(struct xgbe_ring_desc) *
145 ring->rdesc_count),
146 ring->rdesc, ring->rdesc_dma);
147 ring->rdesc = NULL;
148 }
149}
150
151static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
152{
153 struct xgbe_channel *channel;
154 unsigned int i;
155
156 DBGPR("-->xgbe_free_ring_resources\n");
157
158 channel = pdata->channel;
159 for (i = 0; i < pdata->channel_count; i++, channel++) {
160 xgbe_free_ring(pdata, channel->tx_ring);
161 xgbe_free_ring(pdata, channel->rx_ring);
162 }
163
164 DBGPR("<--xgbe_free_ring_resources\n");
165}
166
167static int xgbe_init_ring(struct xgbe_prv_data *pdata,
168 struct xgbe_ring *ring, unsigned int rdesc_count)
169{
170 DBGPR("-->xgbe_init_ring\n");
171
172 if (!ring)
173 return 0;
174
175 /* Descriptors */
176 ring->rdesc_count = rdesc_count;
177 ring->rdesc = dma_alloc_coherent(pdata->dev,
178 (sizeof(struct xgbe_ring_desc) *
179 rdesc_count), &ring->rdesc_dma,
180 GFP_KERNEL);
181 if (!ring->rdesc)
182 return -ENOMEM;
183
184 /* Descriptor information */
185 ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
186 GFP_KERNEL);
187 if (!ring->rdata)
188 return -ENOMEM;
189
190 DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
191 ring->rdesc, ring->rdesc_dma, ring->rdata);
192
193 DBGPR("<--xgbe_init_ring\n");
194
195 return 0;
196}
197
198static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
199{
200 struct xgbe_channel *channel;
201 unsigned int i;
202 int ret;
203
204 DBGPR("-->xgbe_alloc_ring_resources\n");
205
206 channel = pdata->channel;
207 for (i = 0; i < pdata->channel_count; i++, channel++) {
208 DBGPR(" %s - tx_ring:\n", channel->name);
209 ret = xgbe_init_ring(pdata, channel->tx_ring,
210 pdata->tx_desc_count);
211 if (ret) {
212 netdev_alert(pdata->netdev,
213 "error initializing Tx ring\n");
214 goto err_ring;
215 }
216
217 DBGPR(" %s - rx_ring:\n", channel->name);
218 ret = xgbe_init_ring(pdata, channel->rx_ring,
219 pdata->rx_desc_count);
220 if (ret) {
221 netdev_alert(pdata->netdev,
222 "error initializing Tx ring\n");
223 goto err_ring;
224 }
225 }
226
227 DBGPR("<--xgbe_alloc_ring_resources\n");
228
229 return 0;
230
231err_ring:
232 xgbe_free_ring_resources(pdata);
233
234 return ret;
235}
236
237static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
238{
239 struct xgbe_hw_if *hw_if = &pdata->hw_if;
240 struct xgbe_channel *channel;
241 struct xgbe_ring *ring;
242 struct xgbe_ring_data *rdata;
243 struct xgbe_ring_desc *rdesc;
244 dma_addr_t rdesc_dma;
245 unsigned int i, j;
246
247 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
248
249 channel = pdata->channel;
250 for (i = 0; i < pdata->channel_count; i++, channel++) {
251 ring = channel->tx_ring;
252 if (!ring)
253 break;
254
255 rdesc = ring->rdesc;
256 rdesc_dma = ring->rdesc_dma;
257
258 for (j = 0; j < ring->rdesc_count; j++) {
259 rdata = GET_DESC_DATA(ring, j);
260
261 rdata->rdesc = rdesc;
262 rdata->rdesc_dma = rdesc_dma;
263
264 rdesc++;
265 rdesc_dma += sizeof(struct xgbe_ring_desc);
266 }
267
268 ring->cur = 0;
269 ring->dirty = 0;
270 ring->tx.queue_stopped = 0;
271
272 hw_if->tx_desc_init(channel);
273 }
274
275 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
276}
277
278static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
279{
280 struct xgbe_hw_if *hw_if = &pdata->hw_if;
281 struct xgbe_channel *channel;
282 struct xgbe_ring *ring;
283 struct xgbe_ring_desc *rdesc;
284 struct xgbe_ring_data *rdata;
285 dma_addr_t rdesc_dma, skb_dma;
286 struct sk_buff *skb = NULL;
287 unsigned int i, j;
288
289 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
290
291 channel = pdata->channel;
292 for (i = 0; i < pdata->channel_count; i++, channel++) {
293 ring = channel->rx_ring;
294 if (!ring)
295 break;
296
297 rdesc = ring->rdesc;
298 rdesc_dma = ring->rdesc_dma;
299
300 for (j = 0; j < ring->rdesc_count; j++) {
301 rdata = GET_DESC_DATA(ring, j);
302
303 rdata->rdesc = rdesc;
304 rdata->rdesc_dma = rdesc_dma;
305
306 /* Allocate skb & assign to each rdesc */
307 skb = dev_alloc_skb(pdata->rx_buf_size);
308 if (skb == NULL)
309 break;
310 skb_dma = dma_map_single(pdata->dev, skb->data,
311 pdata->rx_buf_size,
312 DMA_FROM_DEVICE);
313 if (dma_mapping_error(pdata->dev, skb_dma)) {
314 netdev_alert(pdata->netdev,
315 "failed to do the dma map\n");
316 dev_kfree_skb_any(skb);
317 break;
318 }
319 rdata->skb = skb;
320 rdata->skb_dma = skb_dma;
321 rdata->skb_dma_len = pdata->rx_buf_size;
322
323 rdesc++;
324 rdesc_dma += sizeof(struct xgbe_ring_desc);
325 }
326
327 ring->cur = 0;
328 ring->dirty = 0;
329 ring->rx.realloc_index = 0;
330 ring->rx.realloc_threshold = 0;
331
332 hw_if->rx_desc_init(channel);
333 }
334
335 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
336}
337
338static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
339 struct xgbe_ring_data *rdata)
340{
341 if (rdata->skb_dma) {
342 if (rdata->mapped_as_page) {
343 dma_unmap_page(pdata->dev, rdata->skb_dma,
344 rdata->skb_dma_len, DMA_TO_DEVICE);
345 } else {
346 dma_unmap_single(pdata->dev, rdata->skb_dma,
347 rdata->skb_dma_len, DMA_TO_DEVICE);
348 }
349 rdata->skb_dma = 0;
350 rdata->skb_dma_len = 0;
351 }
352
353 if (rdata->skb) {
354 dev_kfree_skb_any(rdata->skb);
355 rdata->skb = NULL;
356 }
357
358 rdata->tso_header = 0;
359 rdata->len = 0;
360 rdata->interrupt = 0;
361 rdata->mapped_as_page = 0;
362}
363
364static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
365{
366 struct xgbe_prv_data *pdata = channel->pdata;
367 struct xgbe_ring *ring = channel->tx_ring;
368 struct xgbe_ring_data *rdata;
369 struct xgbe_packet_data *packet;
370 struct skb_frag_struct *frag;
371 dma_addr_t skb_dma;
372 unsigned int start_index, cur_index;
373 unsigned int offset, tso, vlan, datalen, len;
374 unsigned int i;
375
376 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
377
378 offset = 0;
379 start_index = ring->cur;
380 cur_index = ring->cur;
381
382 packet = &ring->packet_data;
383 packet->rdesc_count = 0;
384 packet->length = 0;
385
386 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
387 TSO_ENABLE);
388 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
389 VLAN_CTAG);
390
391 /* Save space for a context descriptor if needed */
392 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
393 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
394 cur_index++;
395 rdata = GET_DESC_DATA(ring, cur_index);
396
397 if (tso) {
398 DBGPR(" TSO packet\n");
399
400 /* Map the TSO header */
401 skb_dma = dma_map_single(pdata->dev, skb->data,
402 packet->header_len, DMA_TO_DEVICE);
403 if (dma_mapping_error(pdata->dev, skb_dma)) {
404 netdev_alert(pdata->netdev, "dma_map_single failed\n");
405 goto err_out;
406 }
407 rdata->skb_dma = skb_dma;
408 rdata->skb_dma_len = packet->header_len;
409 rdata->tso_header = 1;
410
411 offset = packet->header_len;
412
413 packet->length += packet->header_len;
414
415 cur_index++;
416 rdata = GET_DESC_DATA(ring, cur_index);
417 }
418
419 /* Map the (remainder of the) packet */
420 for (datalen = skb_headlen(skb) - offset; datalen; ) {
421 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
422
423 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
424 DMA_TO_DEVICE);
425 if (dma_mapping_error(pdata->dev, skb_dma)) {
426 netdev_alert(pdata->netdev, "dma_map_single failed\n");
427 goto err_out;
428 }
429 rdata->skb_dma = skb_dma;
430 rdata->skb_dma_len = len;
431 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
432 cur_index, skb_dma, len);
433
434 datalen -= len;
435 offset += len;
436
437 packet->length += len;
438
439 cur_index++;
440 rdata = GET_DESC_DATA(ring, cur_index);
441 }
442
443 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
444 DBGPR(" mapping frag %u\n", i);
445
446 frag = &skb_shinfo(skb)->frags[i];
447 offset = 0;
448
449 for (datalen = skb_frag_size(frag); datalen; ) {
450 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
451
452 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
453 len, DMA_TO_DEVICE);
454 if (dma_mapping_error(pdata->dev, skb_dma)) {
455 netdev_alert(pdata->netdev,
456 "skb_frag_dma_map failed\n");
457 goto err_out;
458 }
459 rdata->skb_dma = skb_dma;
460 rdata->skb_dma_len = len;
461 rdata->mapped_as_page = 1;
462 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
463 cur_index, skb_dma, len);
464
465 datalen -= len;
466 offset += len;
467
468 packet->length += len;
469
470 cur_index++;
471 rdata = GET_DESC_DATA(ring, cur_index);
472 }
473 }
474
475 /* Save the skb address in the last entry */
476 rdata->skb = skb;
477
478 /* Save the number of descriptor entries used */
479 packet->rdesc_count = cur_index - start_index;
480
481 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
482
483 return packet->rdesc_count;
484
485err_out:
486 while (start_index < cur_index) {
487 rdata = GET_DESC_DATA(ring, start_index++);
488 xgbe_unmap_skb(pdata, rdata);
489 }
490
491 DBGPR("<--xgbe_map_tx_skb: count=0\n");
492
493 return 0;
494}
495
496static void xgbe_realloc_skb(struct xgbe_channel *channel)
497{
498 struct xgbe_prv_data *pdata = channel->pdata;
499 struct xgbe_hw_if *hw_if = &pdata->hw_if;
500 struct xgbe_ring *ring = channel->rx_ring;
501 struct xgbe_ring_data *rdata;
502 struct sk_buff *skb = NULL;
503 dma_addr_t skb_dma;
504 int i;
505
506 DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
507 ring->rx.realloc_index);
508
509 for (i = 0; i < ring->dirty; i++) {
510 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
511
512 /* Reset rdata values */
513 xgbe_unmap_skb(pdata, rdata);
514
515 /* Allocate skb & assign to each rdesc */
516 skb = dev_alloc_skb(pdata->rx_buf_size);
517 if (skb == NULL) {
518 netdev_alert(pdata->netdev,
519 "failed to allocate skb\n");
520 break;
521 }
522 skb_dma = dma_map_single(pdata->dev, skb->data,
523 pdata->rx_buf_size, DMA_FROM_DEVICE);
524 if (dma_mapping_error(pdata->dev, skb_dma)) {
525 netdev_alert(pdata->netdev,
526 "failed to do the dma map\n");
527 dev_kfree_skb_any(skb);
528 break;
529 }
530 rdata->skb = skb;
531 rdata->skb_dma = skb_dma;
532 rdata->skb_dma_len = pdata->rx_buf_size;
533
534 hw_if->rx_desc_reset(rdata);
535
536 ring->rx.realloc_index++;
537 }
538 ring->dirty = 0;
539
540 DBGPR("<--xgbe_realloc_skb\n");
541}
542
543void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
544{
545 DBGPR("-->xgbe_init_function_ptrs_desc\n");
546
547 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
548 desc_if->free_ring_resources = xgbe_free_ring_resources;
549 desc_if->map_tx_skb = xgbe_map_tx_skb;
550 desc_if->realloc_skb = xgbe_realloc_skb;
551 desc_if->unmap_skb = xgbe_unmap_skb;
552 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
553 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
554
555 DBGPR("<--xgbe_init_function_ptrs_desc\n");
556}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000000..002293b0819d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2182 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/phy.h>
118#include <linux/clk.h>
119
120#include "xgbe.h"
121#include "xgbe-common.h"
122
123
124static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
125 unsigned int usec)
126{
127 unsigned long rate;
128 unsigned int ret;
129
130 DBGPR("-->xgbe_usec_to_riwt\n");
131
132 rate = clk_get_rate(pdata->sysclock);
133
134 /*
135 * Convert the input usec value to the watchdog timer value. Each
136 * watchdog timer value is equivalent to 256 clock cycles.
137 * Calculate the required value as:
138 * ( usec * ( system_clock_mhz / 10^6 ) / 256
139 */
140 ret = (usec * (rate / 1000000)) / 256;
141
142 DBGPR("<--xgbe_usec_to_riwt\n");
143
144 return ret;
145}
146
147static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
148 unsigned int riwt)
149{
150 unsigned long rate;
151 unsigned int ret;
152
153 DBGPR("-->xgbe_riwt_to_usec\n");
154
155 rate = clk_get_rate(pdata->sysclock);
156
157 /*
158 * Convert the input watchdog timer value to the usec value. Each
159 * watchdog timer value is equivalent to 256 clock cycles.
160 * Calculate the required value as:
161 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
162 */
163 ret = (riwt * 256) / (rate / 1000000);
164
165 DBGPR("<--xgbe_riwt_to_usec\n");
166
167 return ret;
168}
169
170static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
171{
172 struct xgbe_channel *channel;
173 unsigned int i;
174
175 channel = pdata->channel;
176 for (i = 0; i < pdata->channel_count; i++, channel++)
177 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
178 pdata->pblx8);
179
180 return 0;
181}
182
183static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
184{
185 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
186}
187
188static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
189{
190 struct xgbe_channel *channel;
191 unsigned int i;
192
193 channel = pdata->channel;
194 for (i = 0; i < pdata->channel_count; i++, channel++) {
195 if (!channel->tx_ring)
196 break;
197
198 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
199 pdata->tx_pbl);
200 }
201
202 return 0;
203}
204
205static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
206{
207 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
208}
209
210static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
211{
212 struct xgbe_channel *channel;
213 unsigned int i;
214
215 channel = pdata->channel;
216 for (i = 0; i < pdata->channel_count; i++, channel++) {
217 if (!channel->rx_ring)
218 break;
219
220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
221 pdata->rx_pbl);
222 }
223
224 return 0;
225}
226
227static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
228{
229 struct xgbe_channel *channel;
230 unsigned int i;
231
232 channel = pdata->channel;
233 for (i = 0; i < pdata->channel_count; i++, channel++) {
234 if (!channel->tx_ring)
235 break;
236
237 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
238 pdata->tx_osp_mode);
239 }
240
241 return 0;
242}
243
244static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
245{
246 unsigned int i;
247
248 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
249 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
250
251 return 0;
252}
253
254static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
255{
256 unsigned int i;
257
258 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
260
261 return 0;
262}
263
264static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
265 unsigned int val)
266{
267 unsigned int i;
268
269 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
270 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
271
272 return 0;
273}
274
275static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
276 unsigned int val)
277{
278 unsigned int i;
279
280 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
281 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
282
283 return 0;
284}
285
286static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
287{
288 struct xgbe_channel *channel;
289 unsigned int i;
290
291 channel = pdata->channel;
292 for (i = 0; i < pdata->channel_count; i++, channel++) {
293 if (!channel->rx_ring)
294 break;
295
296 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
297 pdata->rx_riwt);
298 }
299
300 return 0;
301}
302
303static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
304{
305 return 0;
306}
307
308static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
309{
310 struct xgbe_channel *channel;
311 unsigned int i;
312
313 channel = pdata->channel;
314 for (i = 0; i < pdata->channel_count; i++, channel++) {
315 if (!channel->rx_ring)
316 break;
317
318 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
319 pdata->rx_buf_size);
320 }
321}
322
323static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
324{
325 struct xgbe_channel *channel;
326 unsigned int i;
327
328 channel = pdata->channel;
329 for (i = 0; i < pdata->channel_count; i++, channel++) {
330 if (!channel->tx_ring)
331 break;
332
333 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
334 }
335}
336
337static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
338{
339 unsigned int max_q_count, q_count;
340 unsigned int reg, reg_val;
341 unsigned int i;
342
343 /* Clear MTL flow control */
344 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
345 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
346
347 /* Clear MAC flow control */
348 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
349 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
350 reg = MAC_Q0TFCR;
351 for (i = 0; i < q_count; i++) {
352 reg_val = XGMAC_IOREAD(pdata, reg);
353 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
354 XGMAC_IOWRITE(pdata, reg, reg_val);
355
356 reg += MAC_QTFCR_INC;
357 }
358
359 return 0;
360}
361
362static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
363{
364 unsigned int max_q_count, q_count;
365 unsigned int reg, reg_val;
366 unsigned int i;
367
368 /* Set MTL flow control */
369 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
370 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
371
372 /* Set MAC flow control */
373 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
374 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
375 reg = MAC_Q0TFCR;
376 for (i = 0; i < q_count; i++) {
377 reg_val = XGMAC_IOREAD(pdata, reg);
378
379 /* Enable transmit flow control */
380 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
381 /* Set pause time */
382 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
383
384 XGMAC_IOWRITE(pdata, reg, reg_val);
385
386 reg += MAC_QTFCR_INC;
387 }
388
389 return 0;
390}
391
392static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
393{
394 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
395
396 return 0;
397}
398
399static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
400{
401 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
402
403 return 0;
404}
405
406static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
407{
408 if (pdata->tx_pause)
409 xgbe_enable_tx_flow_control(pdata);
410 else
411 xgbe_disable_tx_flow_control(pdata);
412
413 return 0;
414}
415
416static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
417{
418 if (pdata->rx_pause)
419 xgbe_enable_rx_flow_control(pdata);
420 else
421 xgbe_disable_rx_flow_control(pdata);
422
423 return 0;
424}
425
426static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
427{
428 xgbe_config_tx_flow_control(pdata);
429 xgbe_config_rx_flow_control(pdata);
430}
431
432static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
433{
434 struct xgbe_channel *channel;
435 unsigned int dma_ch_isr, dma_ch_ier;
436 unsigned int i;
437
438 channel = pdata->channel;
439 for (i = 0; i < pdata->channel_count; i++, channel++) {
440 /* Clear all the interrupts which are set */
441 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
442 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
443
444 /* Clear all interrupt enable bits */
445 dma_ch_ier = 0;
446
447 /* Enable following interrupts
448 * NIE - Normal Interrupt Summary Enable
449 * AIE - Abnormal Interrupt Summary Enable
450 * FBEE - Fatal Bus Error Enable
451 */
452 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
453 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
454 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
455
456 if (channel->tx_ring) {
457 /* Enable the following Tx interrupts
458 * TIE - Transmit Interrupt Enable (unless polling)
459 */
460 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
461 }
462 if (channel->rx_ring) {
463 /* Enable following Rx interrupts
464 * RBUE - Receive Buffer Unavailable Enable
465 * RIE - Receive Interrupt Enable
466 */
467 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
468 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
469 }
470
471 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
472 }
473}
474
475static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
476{
477 unsigned int mtl_q_isr;
478 unsigned int q_count, i;
479
480 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
481 for (i = 0; i < q_count; i++) {
482 /* Clear all the interrupts which are set */
483 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
484 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
485
486 /* No MTL interrupts to be enabled */
487 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
488 }
489}
490
491static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
492{
493 /* No MAC interrupts to be enabled */
494 XGMAC_IOWRITE(pdata, MAC_IER, 0);
495
496 /* Enable all counter interrupts */
497 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
498 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
499}
500
501static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
502{
503 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
504
505 return 0;
506}
507
508static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
509{
510 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
511
512 return 0;
513}
514
515static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
516{
517 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
518
519 return 0;
520}
521
522static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
523 unsigned int enable)
524{
525 unsigned int val = enable ? 1 : 0;
526
527 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
528 return 0;
529
530 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
531 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
532
533 return 0;
534}
535
536static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
537 unsigned int enable)
538{
539 unsigned int val = enable ? 1 : 0;
540
541 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
542 return 0;
543
544 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
545 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
546
547 return 0;
548}
549
550static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
551 unsigned int am_mode)
552{
553 struct netdev_hw_addr *ha;
554 unsigned int mac_reg;
555 unsigned int mac_addr_hi, mac_addr_lo;
556 u8 *mac_addr;
557 unsigned int i;
558
559 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
560 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
561
562 i = 0;
563 mac_reg = MAC_MACA1HR;
564
565 netdev_for_each_uc_addr(ha, pdata->netdev) {
566 mac_addr_lo = 0;
567 mac_addr_hi = 0;
568 mac_addr = (u8 *)&mac_addr_lo;
569 mac_addr[0] = ha->addr[0];
570 mac_addr[1] = ha->addr[1];
571 mac_addr[2] = ha->addr[2];
572 mac_addr[3] = ha->addr[3];
573 mac_addr = (u8 *)&mac_addr_hi;
574 mac_addr[0] = ha->addr[4];
575 mac_addr[1] = ha->addr[5];
576
577 DBGPR(" adding unicast address %pM at 0x%04x\n",
578 ha->addr, mac_reg);
579
580 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
581
582 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
583 mac_reg += MAC_MACA_INC;
584 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
585 mac_reg += MAC_MACA_INC;
586
587 i++;
588 }
589
590 if (!am_mode) {
591 netdev_for_each_mc_addr(ha, pdata->netdev) {
592 mac_addr_lo = 0;
593 mac_addr_hi = 0;
594 mac_addr = (u8 *)&mac_addr_lo;
595 mac_addr[0] = ha->addr[0];
596 mac_addr[1] = ha->addr[1];
597 mac_addr[2] = ha->addr[2];
598 mac_addr[3] = ha->addr[3];
599 mac_addr = (u8 *)&mac_addr_hi;
600 mac_addr[0] = ha->addr[4];
601 mac_addr[1] = ha->addr[5];
602
603 DBGPR(" adding multicast address %pM at 0x%04x\n",
604 ha->addr, mac_reg);
605
606 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
607
608 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
609 mac_reg += MAC_MACA_INC;
610 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
611 mac_reg += MAC_MACA_INC;
612
613 i++;
614 }
615 }
616
617 /* Clear remaining additional MAC address entries */
618 for (; i < pdata->hw_feat.addn_mac; i++) {
619 XGMAC_IOWRITE(pdata, mac_reg, 0);
620 mac_reg += MAC_MACA_INC;
621 XGMAC_IOWRITE(pdata, mac_reg, 0);
622 mac_reg += MAC_MACA_INC;
623 }
624
625 return 0;
626}
627
628static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
629{
630 unsigned int mac_addr_hi, mac_addr_lo;
631
632 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
633 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
634 (addr[1] << 8) | (addr[0] << 0);
635
636 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
637 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
638
639 return 0;
640}
641
642static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
643 int mmd_reg)
644{
645 unsigned int mmd_address;
646 int mmd_data;
647
648 if (mmd_reg & MII_ADDR_C45)
649 mmd_address = mmd_reg & ~MII_ADDR_C45;
650 else
651 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
652
653 /* The PCS registers are accessed using mmio. The underlying APB3
654 * management interface uses indirect addressing to access the MMD
655 * register sets. This requires accessing of the PCS register in two
656 * phases, an address phase and a data phase.
657 *
658 * The mmio interface is based on 32-bit offsets and values. All
659 * register offsets must therefore be adjusted by left shifting the
660 * offset 2 bits and reading 32 bits of data.
661 */
662 mutex_lock(&pdata->xpcs_mutex);
663 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
664 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
665 mutex_unlock(&pdata->xpcs_mutex);
666
667 return mmd_data;
668}
669
670static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
671 int mmd_reg, int mmd_data)
672{
673 unsigned int mmd_address;
674
675 if (mmd_reg & MII_ADDR_C45)
676 mmd_address = mmd_reg & ~MII_ADDR_C45;
677 else
678 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
679
680 /* The PCS registers are accessed using mmio. The underlying APB3
681 * management interface uses indirect addressing to access the MMD
682 * register sets. This requires accessing of the PCS register in two
683 * phases, an address phase and a data phase.
684 *
685 * The mmio interface is based on 32-bit offsets and values. All
686 * register offsets must therefore be adjusted by left shifting the
687 * offset 2 bits and reading 32 bits of data.
688 */
689 mutex_lock(&pdata->xpcs_mutex);
690 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
691 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
692 mutex_unlock(&pdata->xpcs_mutex);
693}
694
695static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
696{
697 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
698}
699
700static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
701{
702 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
703
704 return 0;
705}
706
707static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
708{
709 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
710
711 return 0;
712}
713
714static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
715{
716 /* Put the VLAN tag in the Rx descriptor */
717 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
718
719 /* Don't check the VLAN type */
720 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
721
722 /* Check only C-TAG (0x8100) packets */
723 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
724
725 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
726 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
727
728 /* Enable VLAN tag stripping */
729 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
730
731 return 0;
732}
733
734static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
735{
736 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
737
738 return 0;
739}
740
741static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
742{
743 struct xgbe_ring_desc *rdesc = rdata->rdesc;
744
745 /* Reset the Tx descriptor
746 * Set buffer 1 (lo) address to zero
747 * Set buffer 1 (hi) address to zero
748 * Reset all other control bits (IC, TTSE, B2L & B1L)
749 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
750 */
751 rdesc->desc0 = 0;
752 rdesc->desc1 = 0;
753 rdesc->desc2 = 0;
754 rdesc->desc3 = 0;
755}
756
757static void xgbe_tx_desc_init(struct xgbe_channel *channel)
758{
759 struct xgbe_ring *ring = channel->tx_ring;
760 struct xgbe_ring_data *rdata;
761 struct xgbe_ring_desc *rdesc;
762 int i;
763 int start_index = ring->cur;
764
765 DBGPR("-->tx_desc_init\n");
766
767 /* Initialze all descriptors */
768 for (i = 0; i < ring->rdesc_count; i++) {
769 rdata = GET_DESC_DATA(ring, i);
770 rdesc = rdata->rdesc;
771
772 /* Initialize Tx descriptor
773 * Set buffer 1 (lo) address to zero
774 * Set buffer 1 (hi) address to zero
775 * Reset all other control bits (IC, TTSE, B2L & B1L)
776 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
777 * etc)
778 */
779 rdesc->desc0 = 0;
780 rdesc->desc1 = 0;
781 rdesc->desc2 = 0;
782 rdesc->desc3 = 0;
783 }
784
785 /* Make sure everything is written to the descriptor(s) before
786 * telling the device about them
787 */
788 wmb();
789
790 /* Update the total number of Tx descriptors */
791 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
792
793 /* Update the starting address of descriptor ring */
794 rdata = GET_DESC_DATA(ring, start_index);
795 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
796 upper_32_bits(rdata->rdesc_dma));
797 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
798 lower_32_bits(rdata->rdesc_dma));
799
800 DBGPR("<--tx_desc_init\n");
801}
802
803static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
804{
805 struct xgbe_ring_desc *rdesc = rdata->rdesc;
806
807 /* Reset the Rx descriptor
808 * Set buffer 1 (lo) address to dma address (lo)
809 * Set buffer 1 (hi) address to dma address (hi)
810 * Set buffer 2 (lo) address to zero
811 * Set buffer 2 (hi) address to zero and set control bits
812 * OWN and INTE
813 */
814 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
815 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
816 rdesc->desc2 = 0;
817
818 rdesc->desc3 = 0;
819 if (rdata->interrupt)
820 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
821
822 /* Since the Rx DMA engine is likely running, make sure everything
823 * is written to the descriptor(s) before setting the OWN bit
824 * for the descriptor
825 */
826 wmb();
827
828 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
829
830 /* Make sure ownership is written to the descriptor */
831 wmb();
832}
833
834static void xgbe_rx_desc_init(struct xgbe_channel *channel)
835{
836 struct xgbe_prv_data *pdata = channel->pdata;
837 struct xgbe_ring *ring = channel->rx_ring;
838 struct xgbe_ring_data *rdata;
839 struct xgbe_ring_desc *rdesc;
840 unsigned int start_index = ring->cur;
841 unsigned int rx_coalesce, rx_frames;
842 unsigned int i;
843
844 DBGPR("-->rx_desc_init\n");
845
846 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
847 rx_frames = pdata->rx_frames;
848
849 /* Initialize all descriptors */
850 for (i = 0; i < ring->rdesc_count; i++) {
851 rdata = GET_DESC_DATA(ring, i);
852 rdesc = rdata->rdesc;
853
854 /* Initialize Rx descriptor
855 * Set buffer 1 (lo) address to dma address (lo)
856 * Set buffer 1 (hi) address to dma address (hi)
857 * Set buffer 2 (lo) address to zero
858 * Set buffer 2 (hi) address to zero and set control
859 * bits OWN and INTE appropriateley
860 */
861 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
862 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
863 rdesc->desc2 = 0;
864 rdesc->desc3 = 0;
865 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
866 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
867 rdata->interrupt = 1;
868 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
869 /* Clear interrupt on completion bit */
870 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
871 0);
872 rdata->interrupt = 0;
873 }
874 }
875
876 /* Make sure everything is written to the descriptors before
877 * telling the device about them
878 */
879 wmb();
880
881 /* Update the total number of Rx descriptors */
882 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
883
884 /* Update the starting address of descriptor ring */
885 rdata = GET_DESC_DATA(ring, start_index);
886 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
887 upper_32_bits(rdata->rdesc_dma));
888 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
889 lower_32_bits(rdata->rdesc_dma));
890
891 /* Update the Rx Descriptor Tail Pointer */
892 rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
893 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
894 lower_32_bits(rdata->rdesc_dma));
895
896 DBGPR("<--rx_desc_init\n");
897}
898
899static void xgbe_pre_xmit(struct xgbe_channel *channel)
900{
901 struct xgbe_prv_data *pdata = channel->pdata;
902 struct xgbe_ring *ring = channel->tx_ring;
903 struct xgbe_ring_data *rdata;
904 struct xgbe_ring_desc *rdesc;
905 struct xgbe_packet_data *packet = &ring->packet_data;
906 unsigned int csum, tso, vlan;
907 unsigned int tso_context, vlan_context;
908 unsigned int tx_coalesce, tx_frames;
909 int start_index = ring->cur;
910 int i;
911
912 DBGPR("-->xgbe_pre_xmit\n");
913
914 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
915 CSUM_ENABLE);
916 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
917 TSO_ENABLE);
918 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
919 VLAN_CTAG);
920
921 if (tso && (packet->mss != ring->tx.cur_mss))
922 tso_context = 1;
923 else
924 tso_context = 0;
925
926 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
927 vlan_context = 1;
928 else
929 vlan_context = 0;
930
931 tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
932 tx_frames = pdata->tx_frames;
933 if (tx_coalesce && !channel->tx_timer_active)
934 ring->coalesce_count = 0;
935
936 rdata = GET_DESC_DATA(ring, ring->cur);
937 rdesc = rdata->rdesc;
938
939 /* Create a context descriptor if this is a TSO packet */
940 if (tso_context || vlan_context) {
941 if (tso_context) {
942 DBGPR(" TSO context descriptor, mss=%u\n",
943 packet->mss);
944
945 /* Set the MSS size */
946 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
947 MSS, packet->mss);
948
949 /* Mark it as a CONTEXT descriptor */
950 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
951 CTXT, 1);
952
953 /* Indicate this descriptor contains the MSS */
954 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
955 TCMSSV, 1);
956
957 ring->tx.cur_mss = packet->mss;
958 }
959
960 if (vlan_context) {
961 DBGPR(" VLAN context descriptor, ctag=%u\n",
962 packet->vlan_ctag);
963
964 /* Mark it as a CONTEXT descriptor */
965 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
966 CTXT, 1);
967
968 /* Set the VLAN tag */
969 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
970 VT, packet->vlan_ctag);
971
972 /* Indicate this descriptor contains the VLAN tag */
973 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
974 VLTV, 1);
975
976 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
977 }
978
979 ring->cur++;
980 rdata = GET_DESC_DATA(ring, ring->cur);
981 rdesc = rdata->rdesc;
982 }
983
984 /* Update buffer address (for TSO this is the header) */
985 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
986 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
987
988 /* Update the buffer length */
989 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
990 rdata->skb_dma_len);
991
992 /* VLAN tag insertion check */
993 if (vlan)
994 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
995 TX_NORMAL_DESC2_VLAN_INSERT);
996
997 /* Set IC bit based on Tx coalescing settings */
998 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
999 if (tx_coalesce && (!tx_frames ||
1000 (++ring->coalesce_count % tx_frames)))
1001 /* Clear IC bit */
1002 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1003
1004 /* Mark it as First Descriptor */
1005 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1006
1007 /* Mark it as a NORMAL descriptor */
1008 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1009
1010 /* Set OWN bit if not the first descriptor */
1011 if (ring->cur != start_index)
1012 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1013
1014 if (tso) {
1015 /* Enable TSO */
1016 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1017 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1018 packet->tcp_payload_len);
1019 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1020 packet->tcp_header_len / 4);
1021 } else {
1022 /* Enable CRC and Pad Insertion */
1023 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1024
1025 /* Enable HW CSUM */
1026 if (csum)
1027 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1028 CIC, 0x3);
1029
1030 /* Set the total length to be transmitted */
1031 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1032 packet->length);
1033 }
1034
1035 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
1036 ring->cur++;
1037 rdata = GET_DESC_DATA(ring, ring->cur);
1038 rdesc = rdata->rdesc;
1039
1040 /* Update buffer address */
1041 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1042 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1043
1044 /* Update the buffer length */
1045 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1046 rdata->skb_dma_len);
1047
1048 /* Set IC bit based on Tx coalescing settings */
1049 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1050 if (tx_coalesce && (!tx_frames ||
1051 (++ring->coalesce_count % tx_frames)))
1052 /* Clear IC bit */
1053 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1054
1055 /* Set OWN bit */
1056 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1057
1058 /* Mark it as NORMAL descriptor */
1059 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1060
1061 /* Enable HW CSUM */
1062 if (csum)
1063 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1064 CIC, 0x3);
1065 }
1066
1067 /* Set LAST bit for the last descriptor */
1068 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1069
1070 /* In case the Tx DMA engine is running, make sure everything
1071 * is written to the descriptor(s) before setting the OWN bit
1072 * for the first descriptor
1073 */
1074 wmb();
1075
1076 /* Set OWN bit for the first descriptor */
1077 rdata = GET_DESC_DATA(ring, start_index);
1078 rdesc = rdata->rdesc;
1079 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1080
1081#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1082 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1083#endif
1084
1085 /* Make sure ownership is written to the descriptor */
1086 wmb();
1087
1088 /* Issue a poll command to Tx DMA by writing address
1089 * of next immediate free descriptor */
1090 ring->cur++;
1091 rdata = GET_DESC_DATA(ring, ring->cur);
1092 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1093 lower_32_bits(rdata->rdesc_dma));
1094
1095 /* Start the Tx coalescing timer */
1096 if (tx_coalesce && !channel->tx_timer_active) {
1097 channel->tx_timer_active = 1;
1098 hrtimer_start(&channel->tx_timer,
1099 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
1100 HRTIMER_MODE_REL);
1101 }
1102
1103 DBGPR(" %s: descriptors %u to %u written\n",
1104 channel->name, start_index & (ring->rdesc_count - 1),
1105 (ring->cur - 1) & (ring->rdesc_count - 1));
1106
1107 DBGPR("<--xgbe_pre_xmit\n");
1108}
1109
1110static int xgbe_dev_read(struct xgbe_channel *channel)
1111{
1112 struct xgbe_ring *ring = channel->rx_ring;
1113 struct xgbe_ring_data *rdata;
1114 struct xgbe_ring_desc *rdesc;
1115 struct xgbe_packet_data *packet = &ring->packet_data;
1116 unsigned int err, etlt;
1117
1118 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1119
1120 rdata = GET_DESC_DATA(ring, ring->cur);
1121 rdesc = rdata->rdesc;
1122
1123 /* Check for data availability */
1124 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1125 return 1;
1126
1127#ifdef XGMAC_ENABLE_RX_DESC_DUMP
1128 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1129#endif
1130
1131 /* Get the packet length */
1132 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1133
1134 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1135 /* Not all the data has been transferred for this packet */
1136 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1137 INCOMPLETE, 1);
1138 return 0;
1139 }
1140
1141 /* This is the last of the data for this packet */
1142 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1143 INCOMPLETE, 0);
1144
1145 /* Set checksum done indicator as appropriate */
1146 if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
1147 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1148 CSUM_DONE, 1);
1149
1150 /* Check for errors (only valid in last descriptor) */
1151 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1152 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1153 DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1154
1155 if (!err || (err && !etlt)) {
1156 if (etlt == 0x09) {
1157 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1158 VLAN_CTAG, 1);
1159 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1160 RX_NORMAL_DESC0,
1161 OVT);
1162 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1163 }
1164 } else {
1165 if ((etlt == 0x05) || (etlt == 0x06))
1166 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1167 CSUM_DONE, 0);
1168 else
1169 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1170 FRAME, 1);
1171 }
1172
1173 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1174 ring->cur & (ring->rdesc_count - 1), ring->cur);
1175
1176 return 0;
1177}
1178
1179static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1180{
1181 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1182 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1183}
1184
1185static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1186{
1187 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1188 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1189}
1190
1191static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
1192 enum xgbe_int_state int_state)
1193{
1194 unsigned int dma_ch_ier;
1195
1196 if (int_state == XGMAC_INT_STATE_SAVE) {
1197 channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1198 channel->saved_ier &= DMA_INTERRUPT_MASK;
1199 } else {
1200 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1201 dma_ch_ier |= channel->saved_ier;
1202 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1203 }
1204}
1205
1206static int xgbe_enable_int(struct xgbe_channel *channel,
1207 enum xgbe_int int_id)
1208{
1209 switch (int_id) {
1210 case XGMAC_INT_DMA_ISR_DC0IS:
1211 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1212 break;
1213 case XGMAC_INT_DMA_CH_SR_TI:
1214 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1215 break;
1216 case XGMAC_INT_DMA_CH_SR_TPS:
1217 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
1218 break;
1219 case XGMAC_INT_DMA_CH_SR_TBU:
1220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
1221 break;
1222 case XGMAC_INT_DMA_CH_SR_RI:
1223 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
1224 break;
1225 case XGMAC_INT_DMA_CH_SR_RBU:
1226 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
1227 break;
1228 case XGMAC_INT_DMA_CH_SR_RPS:
1229 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
1230 break;
1231 case XGMAC_INT_DMA_CH_SR_FBE:
1232 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
1233 break;
1234 case XGMAC_INT_DMA_ALL:
1235 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
1236 break;
1237 default:
1238 return -1;
1239 }
1240
1241 return 0;
1242}
1243
1244static int xgbe_disable_int(struct xgbe_channel *channel,
1245 enum xgbe_int int_id)
1246{
1247 unsigned int dma_ch_ier;
1248
1249 switch (int_id) {
1250 case XGMAC_INT_DMA_ISR_DC0IS:
1251 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1252 break;
1253 case XGMAC_INT_DMA_CH_SR_TI:
1254 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1255 break;
1256 case XGMAC_INT_DMA_CH_SR_TPS:
1257 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
1258 break;
1259 case XGMAC_INT_DMA_CH_SR_TBU:
1260 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
1261 break;
1262 case XGMAC_INT_DMA_CH_SR_RI:
1263 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
1264 break;
1265 case XGMAC_INT_DMA_CH_SR_RBU:
1266 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
1267 break;
1268 case XGMAC_INT_DMA_CH_SR_RPS:
1269 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
1270 break;
1271 case XGMAC_INT_DMA_CH_SR_FBE:
1272 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
1273 break;
1274 case XGMAC_INT_DMA_ALL:
1275 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
1276
1277 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1278 dma_ch_ier &= ~DMA_INTERRUPT_MASK;
1279 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1280 break;
1281 default:
1282 return -1;
1283 }
1284
1285 return 0;
1286}
1287
1288static int xgbe_exit(struct xgbe_prv_data *pdata)
1289{
1290 unsigned int count = 2000;
1291
1292 DBGPR("-->xgbe_exit\n");
1293
1294 /* Issue a software reset */
1295 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1296 usleep_range(10, 15);
1297
1298 /* Poll Until Poll Condition */
1299 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1300 usleep_range(500, 600);
1301
1302 if (!count)
1303 return -EBUSY;
1304
1305 DBGPR("<--xgbe_exit\n");
1306
1307 return 0;
1308}
1309
1310static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1311{
1312 unsigned int i, count;
1313
1314 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1315 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1316
1317 /* Poll Until Poll Condition */
1318 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
1319 count = 2000;
1320 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1321 MTL_Q_TQOMR, FTQ))
1322 usleep_range(500, 600);
1323
1324 if (!count)
1325 return -EBUSY;
1326 }
1327
1328 return 0;
1329}
1330
1331static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1332{
1333 /* Set enhanced addressing mode */
1334 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1335
1336 /* Set the System Bus mode */
1337 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1338}
1339
1340static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1341{
1342 unsigned int arcache, awcache;
1343
1344 arcache = 0;
1345 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
1346 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
1347 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
1348 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
1349 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
1350 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
1351 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1352
1353 awcache = 0;
1354 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
1355 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
1356 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
1357 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
1358 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
1359 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
1360 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
1361 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
1362 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1363}
1364
1365static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1366{
1367 unsigned int i;
1368
1369 /* Set Tx to weighted round robin scheduling algorithm (when
1370 * traffic class is using ETS algorithm)
1371 */
1372 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1373
1374 /* Set Tx traffic classes to strict priority algorithm */
1375 for (i = 0; i < XGBE_TC_CNT; i++)
1376 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
1377
1378 /* Set Rx to strict priority algorithm */
1379 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1380}
1381
1382static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1383 unsigned char queue_count)
1384{
1385 unsigned int q_fifo_size = 0;
1386 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1387
1388 /* Calculate Tx/Rx fifo share per queue */
1389 switch (fifo_size) {
1390 case 0:
1391 q_fifo_size = FIFO_SIZE_B(128);
1392 break;
1393 case 1:
1394 q_fifo_size = FIFO_SIZE_B(256);
1395 break;
1396 case 2:
1397 q_fifo_size = FIFO_SIZE_B(512);
1398 break;
1399 case 3:
1400 q_fifo_size = FIFO_SIZE_KB(1);
1401 break;
1402 case 4:
1403 q_fifo_size = FIFO_SIZE_KB(2);
1404 break;
1405 case 5:
1406 q_fifo_size = FIFO_SIZE_KB(4);
1407 break;
1408 case 6:
1409 q_fifo_size = FIFO_SIZE_KB(8);
1410 break;
1411 case 7:
1412 q_fifo_size = FIFO_SIZE_KB(16);
1413 break;
1414 case 8:
1415 q_fifo_size = FIFO_SIZE_KB(32);
1416 break;
1417 case 9:
1418 q_fifo_size = FIFO_SIZE_KB(64);
1419 break;
1420 case 10:
1421 q_fifo_size = FIFO_SIZE_KB(128);
1422 break;
1423 case 11:
1424 q_fifo_size = FIFO_SIZE_KB(256);
1425 break;
1426 }
1427 q_fifo_size = q_fifo_size / queue_count;
1428
1429 /* Set the queue fifo size programmable value */
1430 if (q_fifo_size >= FIFO_SIZE_KB(256))
1431 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
1432 else if (q_fifo_size >= FIFO_SIZE_KB(128))
1433 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
1434 else if (q_fifo_size >= FIFO_SIZE_KB(64))
1435 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
1436 else if (q_fifo_size >= FIFO_SIZE_KB(32))
1437 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
1438 else if (q_fifo_size >= FIFO_SIZE_KB(16))
1439 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
1440 else if (q_fifo_size >= FIFO_SIZE_KB(8))
1441 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
1442 else if (q_fifo_size >= FIFO_SIZE_KB(4))
1443 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
1444 else if (q_fifo_size >= FIFO_SIZE_KB(2))
1445 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
1446 else if (q_fifo_size >= FIFO_SIZE_KB(1))
1447 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
1448 else if (q_fifo_size >= FIFO_SIZE_B(512))
1449 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
1450 else if (q_fifo_size >= FIFO_SIZE_B(256))
1451 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1452
1453 return p_fifo;
1454}
1455
1456static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1457{
1458 enum xgbe_mtl_fifo_size fifo_size;
1459 unsigned int i;
1460
1461 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1462 pdata->hw_feat.tx_q_cnt);
1463
1464 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1465 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1466
1467 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1468 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
1469}
1470
1471static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1472{
1473 enum xgbe_mtl_fifo_size fifo_size;
1474 unsigned int i;
1475
1476 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1477 pdata->hw_feat.rx_q_cnt);
1478
1479 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1480 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1481
1482 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1483 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
1484}
1485
1486static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
1487{
1488 unsigned int i, reg, reg_val;
1489 unsigned int q_count = pdata->hw_feat.rx_q_cnt;
1490
1491 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1492 reg = MTL_RQDCM0R;
1493 reg_val = 0;
1494 for (i = 0; i < q_count;) {
1495 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1496
1497 if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
1498 continue;
1499
1500 XGMAC_IOWRITE(pdata, reg, reg_val);
1501
1502 reg += MTL_RQDCM_INC;
1503 reg_val = 0;
1504 }
1505}
1506
1507static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
1512 /* Activate flow control when less than 4k left in fifo */
1513 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
1514
1515 /* De-activate flow control when more than 6k left in fifo */
1516 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
1517 }
1518}
1519
1520static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
1521{
1522 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
1523}
1524
1525static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
1526{
1527 unsigned int val;
1528
1529 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
1530
1531 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1532}
1533
1534static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
1535{
1536 if (pdata->netdev->features & NETIF_F_RXCSUM)
1537 xgbe_enable_rx_csum(pdata);
1538 else
1539 xgbe_disable_rx_csum(pdata);
1540}
1541
1542static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1543{
1544 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1545 xgbe_enable_rx_vlan_stripping(pdata);
1546 else
1547 xgbe_disable_rx_vlan_stripping(pdata);
1548}
1549
1550static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1551{
1552 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1553 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
1554
1555 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1556 stats->txoctetcount_gb +=
1557 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1558
1559 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1560 stats->txframecount_gb +=
1561 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1562
1563 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1564 stats->txbroadcastframes_g +=
1565 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1566
1567 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1568 stats->txmulticastframes_g +=
1569 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1570
1571 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1572 stats->tx64octets_gb +=
1573 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1574
1575 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1576 stats->tx65to127octets_gb +=
1577 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1578
1579 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1580 stats->tx128to255octets_gb +=
1581 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1582
1583 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1584 stats->tx256to511octets_gb +=
1585 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1586
1587 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1588 stats->tx512to1023octets_gb +=
1589 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1590
1591 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1592 stats->tx1024tomaxoctets_gb +=
1593 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1594
1595 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1596 stats->txunicastframes_gb +=
1597 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1598
1599 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
1600 stats->txmulticastframes_gb +=
1601 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1602
1603 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
1604 stats->txbroadcastframes_g +=
1605 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1606
1607 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
1608 stats->txunderflowerror +=
1609 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1610
1611 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
1612 stats->txoctetcount_g +=
1613 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1614
1615 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
1616 stats->txframecount_g +=
1617 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1618
1619 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
1620 stats->txpauseframes +=
1621 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1622
1623 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
1624 stats->txvlanframes_g +=
1625 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1626}
1627
1628static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
1629{
1630 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1631 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
1632
1633 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
1634 stats->rxframecount_gb +=
1635 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1636
1637 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
1638 stats->rxoctetcount_gb +=
1639 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1640
1641 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
1642 stats->rxoctetcount_g +=
1643 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1644
1645 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
1646 stats->rxbroadcastframes_g +=
1647 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1648
1649 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
1650 stats->rxmulticastframes_g +=
1651 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1652
1653 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
1654 stats->rxcrcerror +=
1655 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1656
1657 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
1658 stats->rxrunterror +=
1659 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1660
1661 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
1662 stats->rxjabbererror +=
1663 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1664
1665 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
1666 stats->rxundersize_g +=
1667 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1668
1669 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
1670 stats->rxoversize_g +=
1671 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1672
1673 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
1674 stats->rx64octets_gb +=
1675 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1676
1677 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
1678 stats->rx65to127octets_gb +=
1679 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1680
1681 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
1682 stats->rx128to255octets_gb +=
1683 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1684
1685 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
1686 stats->rx256to511octets_gb +=
1687 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1688
1689 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
1690 stats->rx512to1023octets_gb +=
1691 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1692
1693 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
1694 stats->rx1024tomaxoctets_gb +=
1695 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1696
1697 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
1698 stats->rxunicastframes_g +=
1699 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1700
1701 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
1702 stats->rxlengtherror +=
1703 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1704
1705 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
1706 stats->rxoutofrangetype +=
1707 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1708
1709 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
1710 stats->rxpauseframes +=
1711 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1712
1713 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
1714 stats->rxfifooverflow +=
1715 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1716
1717 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
1718 stats->rxvlanframes_gb +=
1719 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1720
1721 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
1722 stats->rxwatchdogerror +=
1723 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1724}
1725
1726static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
1727{
1728 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1729
1730 /* Freeze counters */
1731 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
1732
1733 stats->txoctetcount_gb +=
1734 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1735
1736 stats->txframecount_gb +=
1737 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1738
1739 stats->txbroadcastframes_g +=
1740 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1741
1742 stats->txmulticastframes_g +=
1743 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1744
1745 stats->tx64octets_gb +=
1746 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1747
1748 stats->tx65to127octets_gb +=
1749 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1750
1751 stats->tx128to255octets_gb +=
1752 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1753
1754 stats->tx256to511octets_gb +=
1755 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1756
1757 stats->tx512to1023octets_gb +=
1758 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1759
1760 stats->tx1024tomaxoctets_gb +=
1761 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1762
1763 stats->txunicastframes_gb +=
1764 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1765
1766 stats->txmulticastframes_gb +=
1767 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1768
1769 stats->txbroadcastframes_g +=
1770 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1771
1772 stats->txunderflowerror +=
1773 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1774
1775 stats->txoctetcount_g +=
1776 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1777
1778 stats->txframecount_g +=
1779 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1780
1781 stats->txpauseframes +=
1782 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1783
1784 stats->txvlanframes_g +=
1785 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1786
1787 stats->rxframecount_gb +=
1788 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1789
1790 stats->rxoctetcount_gb +=
1791 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1792
1793 stats->rxoctetcount_g +=
1794 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1795
1796 stats->rxbroadcastframes_g +=
1797 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1798
1799 stats->rxmulticastframes_g +=
1800 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1801
1802 stats->rxcrcerror +=
1803 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1804
1805 stats->rxrunterror +=
1806 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1807
1808 stats->rxjabbererror +=
1809 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1810
1811 stats->rxundersize_g +=
1812 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1813
1814 stats->rxoversize_g +=
1815 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1816
1817 stats->rx64octets_gb +=
1818 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1819
1820 stats->rx65to127octets_gb +=
1821 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1822
1823 stats->rx128to255octets_gb +=
1824 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1825
1826 stats->rx256to511octets_gb +=
1827 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1828
1829 stats->rx512to1023octets_gb +=
1830 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1831
1832 stats->rx1024tomaxoctets_gb +=
1833 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1834
1835 stats->rxunicastframes_g +=
1836 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1837
1838 stats->rxlengtherror +=
1839 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1840
1841 stats->rxoutofrangetype +=
1842 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1843
1844 stats->rxpauseframes +=
1845 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1846
1847 stats->rxfifooverflow +=
1848 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1849
1850 stats->rxvlanframes_gb +=
1851 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1852
1853 stats->rxwatchdogerror +=
1854 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1855
1856 /* Un-freeze counters */
1857 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1858}
1859
1860static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
1861{
1862 /* Set counters to reset on read */
1863 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1864
1865 /* Reset the counters */
1866 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1867}
1868
1869static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
1870{
1871 struct xgbe_channel *channel;
1872 unsigned int i;
1873
1874 /* Enable each Tx DMA channel */
1875 channel = pdata->channel;
1876 for (i = 0; i < pdata->channel_count; i++, channel++) {
1877 if (!channel->tx_ring)
1878 break;
1879
1880 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
1881 }
1882
1883 /* Enable each Tx queue */
1884 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1885 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
1886 MTL_Q_ENABLED);
1887
1888 /* Enable MAC Tx */
1889 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
1890}
1891
1892static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
1893{
1894 struct xgbe_channel *channel;
1895 unsigned int i;
1896
1897 /* Disable MAC Tx */
1898 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
1899
1900 /* Disable each Tx queue */
1901 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1902 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
1903
1904 /* Disable each Tx DMA channel */
1905 channel = pdata->channel;
1906 for (i = 0; i < pdata->channel_count; i++, channel++) {
1907 if (!channel->tx_ring)
1908 break;
1909
1910 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
1911 }
1912}
1913
1914static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
1915{
1916 struct xgbe_channel *channel;
1917 unsigned int reg_val, i;
1918
1919 /* Enable each Rx DMA channel */
1920 channel = pdata->channel;
1921 for (i = 0; i < pdata->channel_count; i++, channel++) {
1922 if (!channel->rx_ring)
1923 break;
1924
1925 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
1926 }
1927
1928 /* Enable each Rx queue */
1929 reg_val = 0;
1930 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1931 reg_val |= (0x02 << (i << 1));
1932 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
1933
1934 /* Enable MAC Rx */
1935 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
1936 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
1937 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
1938 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
1939}
1940
1941static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
1942{
1943 struct xgbe_channel *channel;
1944 unsigned int i;
1945
1946 /* Disable MAC Rx */
1947 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
1948 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
1949 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
1950 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
1951
1952 /* Disable each Rx queue */
1953 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
1954
1955 /* Disable each Rx DMA channel */
1956 channel = pdata->channel;
1957 for (i = 0; i < pdata->channel_count; i++, channel++) {
1958 if (!channel->rx_ring)
1959 break;
1960
1961 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
1962 }
1963}
1964
1965static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
1966{
1967 struct xgbe_channel *channel;
1968 unsigned int i;
1969
1970 /* Enable each Tx DMA channel */
1971 channel = pdata->channel;
1972 for (i = 0; i < pdata->channel_count; i++, channel++) {
1973 if (!channel->tx_ring)
1974 break;
1975
1976 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
1977 }
1978
1979 /* Enable MAC Tx */
1980 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
1981}
1982
1983static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
1984{
1985 struct xgbe_channel *channel;
1986 unsigned int i;
1987
1988 /* Disable MAC Tx */
1989 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
1990
1991 /* Disable each Tx DMA channel */
1992 channel = pdata->channel;
1993 for (i = 0; i < pdata->channel_count; i++, channel++) {
1994 if (!channel->tx_ring)
1995 break;
1996
1997 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
1998 }
1999}
2000
2001static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2002{
2003 struct xgbe_channel *channel;
2004 unsigned int i;
2005
2006 /* Enable each Rx DMA channel */
2007 channel = pdata->channel;
2008 for (i = 0; i < pdata->channel_count; i++, channel++) {
2009 if (!channel->rx_ring)
2010 break;
2011
2012 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2013 }
2014}
2015
2016static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2017{
2018 struct xgbe_channel *channel;
2019 unsigned int i;
2020
2021 /* Disable each Rx DMA channel */
2022 channel = pdata->channel;
2023 for (i = 0; i < pdata->channel_count; i++, channel++) {
2024 if (!channel->rx_ring)
2025 break;
2026
2027 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2028 }
2029}
2030
2031static int xgbe_init(struct xgbe_prv_data *pdata)
2032{
2033 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2034 int ret;
2035
2036 DBGPR("-->xgbe_init\n");
2037
2038 /* Flush Tx queues */
2039 ret = xgbe_flush_tx_queues(pdata);
2040 if (ret)
2041 return ret;
2042
2043 /*
2044 * Initialize DMA related features
2045 */
2046 xgbe_config_dma_bus(pdata);
2047 xgbe_config_dma_cache(pdata);
2048 xgbe_config_osp_mode(pdata);
2049 xgbe_config_pblx8(pdata);
2050 xgbe_config_tx_pbl_val(pdata);
2051 xgbe_config_rx_pbl_val(pdata);
2052 xgbe_config_rx_coalesce(pdata);
2053 xgbe_config_tx_coalesce(pdata);
2054 xgbe_config_rx_buffer_size(pdata);
2055 xgbe_config_tso_mode(pdata);
2056 desc_if->wrapper_tx_desc_init(pdata);
2057 desc_if->wrapper_rx_desc_init(pdata);
2058 xgbe_enable_dma_interrupts(pdata);
2059
2060 /*
2061 * Initialize MTL related features
2062 */
2063 xgbe_config_mtl_mode(pdata);
2064 xgbe_config_rx_queue_mapping(pdata);
2065 /*TODO: Program the priorities mapped to the Selected Traffic Classes
2066 in MTL_TC_Prty_Map0-3 registers */
2067 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2068 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2069 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2070 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2071 xgbe_config_tx_fifo_size(pdata);
2072 xgbe_config_rx_fifo_size(pdata);
2073 xgbe_config_flow_control_threshold(pdata);
2074 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
2075 /*TODO: Error Packet and undersized good Packet forwarding enable
2076 (FEP and FUP)
2077 */
2078 xgbe_enable_mtl_interrupts(pdata);
2079
2080 /* Transmit Class Weight */
2081 XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
2082
2083 /*
2084 * Initialize MAC related features
2085 */
2086 xgbe_config_mac_address(pdata);
2087 xgbe_config_jumbo_enable(pdata);
2088 xgbe_config_flow_control(pdata);
2089 xgbe_config_checksum_offload(pdata);
2090 xgbe_config_vlan_support(pdata);
2091 xgbe_config_mmc(pdata);
2092 xgbe_enable_mac_interrupts(pdata);
2093
2094 DBGPR("<--xgbe_init\n");
2095
2096 return 0;
2097}
2098
2099void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2100{
2101 DBGPR("-->xgbe_init_function_ptrs\n");
2102
2103 hw_if->tx_complete = xgbe_tx_complete;
2104
2105 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
2106 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
2107 hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
2108 hw_if->set_mac_address = xgbe_set_mac_address;
2109
2110 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2111 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2112
2113 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2114 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2115
2116 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2117 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2118
2119 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2120 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2121 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2122
2123 hw_if->enable_tx = xgbe_enable_tx;
2124 hw_if->disable_tx = xgbe_disable_tx;
2125 hw_if->enable_rx = xgbe_enable_rx;
2126 hw_if->disable_rx = xgbe_disable_rx;
2127
2128 hw_if->powerup_tx = xgbe_powerup_tx;
2129 hw_if->powerdown_tx = xgbe_powerdown_tx;
2130 hw_if->powerup_rx = xgbe_powerup_rx;
2131 hw_if->powerdown_rx = xgbe_powerdown_rx;
2132
2133 hw_if->pre_xmit = xgbe_pre_xmit;
2134 hw_if->dev_read = xgbe_dev_read;
2135 hw_if->enable_int = xgbe_enable_int;
2136 hw_if->disable_int = xgbe_disable_int;
2137 hw_if->init = xgbe_init;
2138 hw_if->exit = xgbe_exit;
2139
2140 /* Descriptor related Sequences have to be initialized here */
2141 hw_if->tx_desc_init = xgbe_tx_desc_init;
2142 hw_if->rx_desc_init = xgbe_rx_desc_init;
2143 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2144 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2145 hw_if->is_last_desc = xgbe_is_last_desc;
2146 hw_if->is_context_desc = xgbe_is_context_desc;
2147
2148 /* For FLOW ctrl */
2149 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2150 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2151
2152 /* For RX coalescing */
2153 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2154 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2155 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2156 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2157
2158 /* For RX and TX threshold config */
2159 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2160 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2161
2162 /* For RX and TX Store and Forward Mode config */
2163 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2164 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2165
2166 /* For TX DMA Operating on Second Frame config */
2167 hw_if->config_osp_mode = xgbe_config_osp_mode;
2168
2169 /* For RX and TX PBL config */
2170 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2171 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2172 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2173 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2174 hw_if->config_pblx8 = xgbe_config_pblx8;
2175
2176 /* For MMC statistics support */
2177 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2178 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2179 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2180
2181 DBGPR("<--xgbe_init_function_ptrs\n");
2182}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000000..cfe3d93b5f52
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,1351 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/tcp.h>
119#include <linux/if_vlan.h>
120#include <linux/phy.h>
121#include <net/busy_poll.h>
122#include <linux/clk.h>
123#include <linux/if_ether.h>
124
125#include "xgbe.h"
126#include "xgbe-common.h"
127
128
129static int xgbe_poll(struct napi_struct *, int);
130static void xgbe_set_rx_mode(struct net_device *);
131
132static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
133{
134 return (ring->rdesc_count - (ring->cur - ring->dirty));
135}
136
137static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
138{
139 unsigned int rx_buf_size;
140
141 if (mtu > XGMAC_JUMBO_PACKET_MTU) {
142 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
143 return -EINVAL;
144 }
145
146 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
147 if (rx_buf_size < RX_MIN_BUF_SIZE)
148 rx_buf_size = RX_MIN_BUF_SIZE;
149 rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
150
151 return rx_buf_size;
152}
153
154static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
155{
156 struct xgbe_hw_if *hw_if = &pdata->hw_if;
157 struct xgbe_channel *channel;
158 unsigned int i;
159
160 channel = pdata->channel;
161 for (i = 0; i < pdata->channel_count; i++, channel++) {
162 if (channel->tx_ring)
163 hw_if->enable_int(channel,
164 XGMAC_INT_DMA_CH_SR_TI);
165 if (channel->rx_ring)
166 hw_if->enable_int(channel,
167 XGMAC_INT_DMA_CH_SR_RI);
168 }
169}
170
171static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
172{
173 struct xgbe_hw_if *hw_if = &pdata->hw_if;
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++) {
179 if (channel->tx_ring)
180 hw_if->disable_int(channel,
181 XGMAC_INT_DMA_CH_SR_TI);
182 if (channel->rx_ring)
183 hw_if->disable_int(channel,
184 XGMAC_INT_DMA_CH_SR_RI);
185 }
186}
187
188static irqreturn_t xgbe_isr(int irq, void *data)
189{
190 struct xgbe_prv_data *pdata = data;
191 struct xgbe_hw_if *hw_if = &pdata->hw_if;
192 struct xgbe_channel *channel;
193 unsigned int dma_isr, dma_ch_isr;
194 unsigned int mac_isr;
195 unsigned int i;
196
197 /* The DMA interrupt status register also reports MAC and MTL
198 * interrupts. So for polling mode, we just need to check for
199 * this register to be non-zero
200 */
201 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
202 if (!dma_isr)
203 goto isr_done;
204
205 DBGPR("-->xgbe_isr\n");
206
207 DBGPR(" DMA_ISR = %08x\n", dma_isr);
208 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
209 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
210
211 for (i = 0; i < pdata->channel_count; i++) {
212 if (!(dma_isr & (1 << i)))
213 continue;
214
215 channel = pdata->channel + i;
216
217 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
218 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
219
220 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
221 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
222 if (napi_schedule_prep(&pdata->napi)) {
223 /* Disable Tx and Rx interrupts */
224 xgbe_disable_rx_tx_ints(pdata);
225
226 /* Turn on polling */
227 __napi_schedule(&pdata->napi);
228 }
229 }
230
231 /* Restart the device on a Fatal Bus Error */
232 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
233 schedule_work(&pdata->restart_work);
234
235 /* Clear all interrupt signals */
236 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
237 }
238
239 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
240 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
241
242 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
243 hw_if->tx_mmc_int(pdata);
244
245 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
246 hw_if->rx_mmc_int(pdata);
247 }
248
249 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
250
251 DBGPR("<--xgbe_isr\n");
252
253isr_done:
254 return IRQ_HANDLED;
255}
256
257static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
258{
259 struct xgbe_channel *channel = container_of(timer,
260 struct xgbe_channel,
261 tx_timer);
262 struct xgbe_ring *ring = channel->tx_ring;
263 struct xgbe_prv_data *pdata = channel->pdata;
264 unsigned long flags;
265
266 DBGPR("-->xgbe_tx_timer\n");
267
268 spin_lock_irqsave(&ring->lock, flags);
269
270 if (napi_schedule_prep(&pdata->napi)) {
271 /* Disable Tx and Rx interrupts */
272 xgbe_disable_rx_tx_ints(pdata);
273
274 /* Turn on polling */
275 __napi_schedule(&pdata->napi);
276 }
277
278 channel->tx_timer_active = 0;
279
280 spin_unlock_irqrestore(&ring->lock, flags);
281
282 DBGPR("<--xgbe_tx_timer\n");
283
284 return HRTIMER_NORESTART;
285}
286
287static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
288{
289 struct xgbe_channel *channel;
290 unsigned int i;
291
292 DBGPR("-->xgbe_init_tx_timers\n");
293
294 channel = pdata->channel;
295 for (i = 0; i < pdata->channel_count; i++, channel++) {
296 if (!channel->tx_ring)
297 break;
298
299 DBGPR(" %s adding tx timer\n", channel->name);
300 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
301 HRTIMER_MODE_REL);
302 channel->tx_timer.function = xgbe_tx_timer;
303 }
304
305 DBGPR("<--xgbe_init_tx_timers\n");
306}
307
308static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
309{
310 struct xgbe_channel *channel;
311 unsigned int i;
312
313 DBGPR("-->xgbe_stop_tx_timers\n");
314
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->tx_ring)
318 break;
319
320 DBGPR(" %s deleting tx timer\n", channel->name);
321 channel->tx_timer_active = 0;
322 hrtimer_cancel(&channel->tx_timer);
323 }
324
325 DBGPR("<--xgbe_stop_tx_timers\n");
326}
327
328void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
329{
330 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
331 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
332
333 DBGPR("-->xgbe_get_all_hw_features\n");
334
335 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
336 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
337 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
338
339 memset(hw_feat, 0, sizeof(*hw_feat));
340
341 /* Hardware feature register 0 */
342 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
343 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
344 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
345 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
346 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
347 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
348 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
349 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
350 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
351 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
352 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
353 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
354 ADDMACADRSEL);
355 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
356 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
357
358 /* Hardware feature register 1 */
359 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
360 RXFIFOSIZE);
361 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
362 TXFIFOSIZE);
363 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
364 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
365 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
366 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
367 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
368 HASHTBLSZ);
369 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
370 L3L4FNUM);
371
372 /* Hardware feature register 2 */
373 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
374 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
375 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
376 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
377 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
378 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
379
380 /* The Queue and Channel counts are zero based so increment them
381 * to get the actual number
382 */
383 hw_feat->rx_q_cnt++;
384 hw_feat->tx_q_cnt++;
385 hw_feat->rx_ch_cnt++;
386 hw_feat->tx_ch_cnt++;
387
388 DBGPR("<--xgbe_get_all_hw_features\n");
389}
390
391static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
392{
393 if (add)
394 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
395 NAPI_POLL_WEIGHT);
396 napi_enable(&pdata->napi);
397}
398
399static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
400{
401 napi_disable(&pdata->napi);
402}
403
404void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
405{
406 struct xgbe_hw_if *hw_if = &pdata->hw_if;
407
408 DBGPR("-->xgbe_init_tx_coalesce\n");
409
410 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
411 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
412
413 hw_if->config_tx_coalesce(pdata);
414
415 DBGPR("<--xgbe_init_tx_coalesce\n");
416}
417
418void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
419{
420 struct xgbe_hw_if *hw_if = &pdata->hw_if;
421
422 DBGPR("-->xgbe_init_rx_coalesce\n");
423
424 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
425 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
426
427 hw_if->config_rx_coalesce(pdata);
428
429 DBGPR("<--xgbe_init_rx_coalesce\n");
430}
431
432static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
433{
434 struct xgbe_desc_if *desc_if = &pdata->desc_if;
435 struct xgbe_channel *channel;
436 struct xgbe_ring *ring;
437 struct xgbe_ring_data *rdata;
438 unsigned int i, j;
439
440 DBGPR("-->xgbe_free_tx_skbuff\n");
441
442 channel = pdata->channel;
443 for (i = 0; i < pdata->channel_count; i++, channel++) {
444 ring = channel->tx_ring;
445 if (!ring)
446 break;
447
448 for (j = 0; j < ring->rdesc_count; j++) {
449 rdata = GET_DESC_DATA(ring, j);
450 desc_if->unmap_skb(pdata, rdata);
451 }
452 }
453
454 DBGPR("<--xgbe_free_tx_skbuff\n");
455}
456
457static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
458{
459 struct xgbe_desc_if *desc_if = &pdata->desc_if;
460 struct xgbe_channel *channel;
461 struct xgbe_ring *ring;
462 struct xgbe_ring_data *rdata;
463 unsigned int i, j;
464
465 DBGPR("-->xgbe_free_rx_skbuff\n");
466
467 channel = pdata->channel;
468 for (i = 0; i < pdata->channel_count; i++, channel++) {
469 ring = channel->rx_ring;
470 if (!ring)
471 break;
472
473 for (j = 0; j < ring->rdesc_count; j++) {
474 rdata = GET_DESC_DATA(ring, j);
475 desc_if->unmap_skb(pdata, rdata);
476 }
477 }
478
479 DBGPR("<--xgbe_free_rx_skbuff\n");
480}
481
482int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
483{
484 struct xgbe_prv_data *pdata = netdev_priv(netdev);
485 struct xgbe_hw_if *hw_if = &pdata->hw_if;
486 unsigned long flags;
487
488 DBGPR("-->xgbe_powerdown\n");
489
490 if (!netif_running(netdev) ||
491 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
492 netdev_alert(netdev, "Device is already powered down\n");
493 DBGPR("<--xgbe_powerdown\n");
494 return -EINVAL;
495 }
496
497 phy_stop(pdata->phydev);
498
499 spin_lock_irqsave(&pdata->lock, flags);
500
501 if (caller == XGMAC_DRIVER_CONTEXT)
502 netif_device_detach(netdev);
503
504 netif_tx_stop_all_queues(netdev);
505 xgbe_napi_disable(pdata);
506
507 /* Powerdown Tx/Rx */
508 hw_if->powerdown_tx(pdata);
509 hw_if->powerdown_rx(pdata);
510
511 pdata->power_down = 1;
512
513 spin_unlock_irqrestore(&pdata->lock, flags);
514
515 DBGPR("<--xgbe_powerdown\n");
516
517 return 0;
518}
519
520int xgbe_powerup(struct net_device *netdev, unsigned int caller)
521{
522 struct xgbe_prv_data *pdata = netdev_priv(netdev);
523 struct xgbe_hw_if *hw_if = &pdata->hw_if;
524 unsigned long flags;
525
526 DBGPR("-->xgbe_powerup\n");
527
528 if (!netif_running(netdev) ||
529 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
530 netdev_alert(netdev, "Device is already powered up\n");
531 DBGPR("<--xgbe_powerup\n");
532 return -EINVAL;
533 }
534
535 spin_lock_irqsave(&pdata->lock, flags);
536
537 pdata->power_down = 0;
538
539 phy_start(pdata->phydev);
540
541 /* Enable Tx/Rx */
542 hw_if->powerup_tx(pdata);
543 hw_if->powerup_rx(pdata);
544
545 if (caller == XGMAC_DRIVER_CONTEXT)
546 netif_device_attach(netdev);
547
548 xgbe_napi_enable(pdata, 0);
549 netif_tx_start_all_queues(netdev);
550
551 spin_unlock_irqrestore(&pdata->lock, flags);
552
553 DBGPR("<--xgbe_powerup\n");
554
555 return 0;
556}
557
558static int xgbe_start(struct xgbe_prv_data *pdata)
559{
560 struct xgbe_hw_if *hw_if = &pdata->hw_if;
561 struct net_device *netdev = pdata->netdev;
562
563 DBGPR("-->xgbe_start\n");
564
565 xgbe_set_rx_mode(netdev);
566
567 hw_if->init(pdata);
568
569 phy_start(pdata->phydev);
570
571 hw_if->enable_tx(pdata);
572 hw_if->enable_rx(pdata);
573
574 xgbe_init_tx_timers(pdata);
575
576 xgbe_napi_enable(pdata, 1);
577 netif_tx_start_all_queues(netdev);
578
579 DBGPR("<--xgbe_start\n");
580
581 return 0;
582}
583
584static void xgbe_stop(struct xgbe_prv_data *pdata)
585{
586 struct xgbe_hw_if *hw_if = &pdata->hw_if;
587 struct net_device *netdev = pdata->netdev;
588
589 DBGPR("-->xgbe_stop\n");
590
591 phy_stop(pdata->phydev);
592
593 netif_tx_stop_all_queues(netdev);
594 xgbe_napi_disable(pdata);
595
596 xgbe_stop_tx_timers(pdata);
597
598 hw_if->disable_tx(pdata);
599 hw_if->disable_rx(pdata);
600
601 DBGPR("<--xgbe_stop\n");
602}
603
604static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
605{
606 struct xgbe_hw_if *hw_if = &pdata->hw_if;
607
608 DBGPR("-->xgbe_restart_dev\n");
609
610 /* If not running, "restart" will happen on open */
611 if (!netif_running(pdata->netdev))
612 return;
613
614 xgbe_stop(pdata);
615 synchronize_irq(pdata->irq_number);
616
617 xgbe_free_tx_skbuff(pdata);
618 xgbe_free_rx_skbuff(pdata);
619
620 /* Issue software reset to device if requested */
621 if (reset)
622 hw_if->exit(pdata);
623
624 xgbe_start(pdata);
625
626 DBGPR("<--xgbe_restart_dev\n");
627}
628
629static void xgbe_restart(struct work_struct *work)
630{
631 struct xgbe_prv_data *pdata = container_of(work,
632 struct xgbe_prv_data,
633 restart_work);
634
635 rtnl_lock();
636
637 xgbe_restart_dev(pdata, 1);
638
639 rtnl_unlock();
640}
641
642static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
643{
644 if (vlan_tx_tag_present(skb))
645 packet->vlan_ctag = vlan_tx_tag_get(skb);
646}
647
648static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
649{
650 int ret;
651
652 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
653 TSO_ENABLE))
654 return 0;
655
656 ret = skb_cow_head(skb, 0);
657 if (ret)
658 return ret;
659
660 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
661 packet->tcp_header_len = tcp_hdrlen(skb);
662 packet->tcp_payload_len = skb->len - packet->header_len;
663 packet->mss = skb_shinfo(skb)->gso_size;
664 DBGPR(" packet->header_len=%u\n", packet->header_len);
665 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
666 packet->tcp_header_len, packet->tcp_payload_len);
667 DBGPR(" packet->mss=%u\n", packet->mss);
668
669 return 0;
670}
671
672static int xgbe_is_tso(struct sk_buff *skb)
673{
674 if (skb->ip_summed != CHECKSUM_PARTIAL)
675 return 0;
676
677 if (!skb_is_gso(skb))
678 return 0;
679
680 DBGPR(" TSO packet to be processed\n");
681
682 return 1;
683}
684
685static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
686 struct xgbe_packet_data *packet)
687{
688 struct skb_frag_struct *frag;
689 unsigned int context_desc;
690 unsigned int len;
691 unsigned int i;
692
693 context_desc = 0;
694 packet->rdesc_count = 0;
695
696 if (xgbe_is_tso(skb)) {
697 /* TSO requires an extra desriptor if mss is different */
698 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
699 context_desc = 1;
700 packet->rdesc_count++;
701 }
702
703 /* TSO requires an extra desriptor for TSO header */
704 packet->rdesc_count++;
705
706 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
707 TSO_ENABLE, 1);
708 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
709 CSUM_ENABLE, 1);
710 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
711 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
712 CSUM_ENABLE, 1);
713
714 if (vlan_tx_tag_present(skb)) {
715 /* VLAN requires an extra descriptor if tag is different */
716 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
717 /* We can share with the TSO context descriptor */
718 if (!context_desc) {
719 context_desc = 1;
720 packet->rdesc_count++;
721 }
722
723 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
724 VLAN_CTAG, 1);
725 }
726
727 for (len = skb_headlen(skb); len;) {
728 packet->rdesc_count++;
729 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
730 }
731
732 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
733 frag = &skb_shinfo(skb)->frags[i];
734 for (len = skb_frag_size(frag); len; ) {
735 packet->rdesc_count++;
736 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
737 }
738 }
739}
740
741static int xgbe_open(struct net_device *netdev)
742{
743 struct xgbe_prv_data *pdata = netdev_priv(netdev);
744 struct xgbe_hw_if *hw_if = &pdata->hw_if;
745 struct xgbe_desc_if *desc_if = &pdata->desc_if;
746 int ret;
747
748 DBGPR("-->xgbe_open\n");
749
750 /* Enable the clock */
751 ret = clk_prepare_enable(pdata->sysclock);
752 if (ret) {
753 netdev_alert(netdev, "clk_prepare_enable failed\n");
754 return ret;
755 }
756
757 /* Calculate the Rx buffer size before allocating rings */
758 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
759 if (ret < 0)
760 goto err_clk;
761 pdata->rx_buf_size = ret;
762
763 /* Allocate the ring descriptors and buffers */
764 ret = desc_if->alloc_ring_resources(pdata);
765 if (ret)
766 goto err_clk;
767
768 /* Initialize the device restart work struct */
769 INIT_WORK(&pdata->restart_work, xgbe_restart);
770
771 /* Request interrupts */
772 ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
773 netdev->name, pdata);
774 if (ret) {
775 netdev_alert(netdev, "error requesting irq %d\n",
776 pdata->irq_number);
777 goto err_irq;
778 }
779 pdata->irq_number = netdev->irq;
780
781 ret = xgbe_start(pdata);
782 if (ret)
783 goto err_start;
784
785 DBGPR("<--xgbe_open\n");
786
787 return 0;
788
789err_start:
790 hw_if->exit(pdata);
791
792 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
793 pdata->irq_number = 0;
794
795err_irq:
796 desc_if->free_ring_resources(pdata);
797
798err_clk:
799 clk_disable_unprepare(pdata->sysclock);
800
801 return ret;
802}
803
804static int xgbe_close(struct net_device *netdev)
805{
806 struct xgbe_prv_data *pdata = netdev_priv(netdev);
807 struct xgbe_hw_if *hw_if = &pdata->hw_if;
808 struct xgbe_desc_if *desc_if = &pdata->desc_if;
809
810 DBGPR("-->xgbe_close\n");
811
812 /* Stop the device */
813 xgbe_stop(pdata);
814
815 /* Issue software reset to device */
816 hw_if->exit(pdata);
817
818 /* Free all the ring data */
819 desc_if->free_ring_resources(pdata);
820
821 /* Release the interrupt */
822 if (pdata->irq_number != 0) {
823 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
824 pdata->irq_number = 0;
825 }
826
827 /* Disable the clock */
828 clk_disable_unprepare(pdata->sysclock);
829
830 DBGPR("<--xgbe_close\n");
831
832 return 0;
833}
834
835static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
836{
837 struct xgbe_prv_data *pdata = netdev_priv(netdev);
838 struct xgbe_hw_if *hw_if = &pdata->hw_if;
839 struct xgbe_desc_if *desc_if = &pdata->desc_if;
840 struct xgbe_channel *channel;
841 struct xgbe_ring *ring;
842 struct xgbe_packet_data *packet;
843 unsigned long flags;
844 int ret;
845
846 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
847
848 channel = pdata->channel + skb->queue_mapping;
849 ring = channel->tx_ring;
850 packet = &ring->packet_data;
851
852 ret = NETDEV_TX_OK;
853
854 spin_lock_irqsave(&ring->lock, flags);
855
856 if (skb->len == 0) {
857 netdev_err(netdev, "empty skb received from stack\n");
858 dev_kfree_skb_any(skb);
859 goto tx_netdev_return;
860 }
861
862 /* Calculate preliminary packet info */
863 memset(packet, 0, sizeof(*packet));
864 xgbe_packet_info(ring, skb, packet);
865
866 /* Check that there are enough descriptors available */
867 if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
868 DBGPR(" Tx queue stopped, not enough descriptors available\n");
869 netif_stop_subqueue(netdev, channel->queue_index);
870 ring->tx.queue_stopped = 1;
871 ret = NETDEV_TX_BUSY;
872 goto tx_netdev_return;
873 }
874
875 ret = xgbe_prep_tso(skb, packet);
876 if (ret) {
877 netdev_err(netdev, "error processing TSO packet\n");
878 dev_kfree_skb_any(skb);
879 goto tx_netdev_return;
880 }
881 xgbe_prep_vlan(skb, packet);
882
883 if (!desc_if->map_tx_skb(channel, skb)) {
884 dev_kfree_skb_any(skb);
885 goto tx_netdev_return;
886 }
887
888 /* Configure required descriptor fields for transmission */
889 hw_if->pre_xmit(channel);
890
891#ifdef XGMAC_ENABLE_TX_PKT_DUMP
892 xgbe_print_pkt(netdev, skb, true);
893#endif
894
895tx_netdev_return:
896 spin_unlock_irqrestore(&ring->lock, flags);
897
898 DBGPR("<--xgbe_xmit\n");
899
900 return ret;
901}
902
903static void xgbe_set_rx_mode(struct net_device *netdev)
904{
905 struct xgbe_prv_data *pdata = netdev_priv(netdev);
906 struct xgbe_hw_if *hw_if = &pdata->hw_if;
907 unsigned int pr_mode, am_mode;
908
909 DBGPR("-->xgbe_set_rx_mode\n");
910
911 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
912 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
913
914 if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
915 pr_mode = 1;
916 if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
917 am_mode = 1;
918 if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
919 pdata->hw_feat.addn_mac)
920 pr_mode = 1;
921
922 hw_if->set_promiscuous_mode(pdata, pr_mode);
923 hw_if->set_all_multicast_mode(pdata, am_mode);
924 if (!pr_mode)
925 hw_if->set_addn_mac_addrs(pdata, am_mode);
926
927 DBGPR("<--xgbe_set_rx_mode\n");
928}
929
930static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
931{
932 struct xgbe_prv_data *pdata = netdev_priv(netdev);
933 struct xgbe_hw_if *hw_if = &pdata->hw_if;
934 struct sockaddr *saddr = addr;
935
936 DBGPR("-->xgbe_set_mac_address\n");
937
938 if (!is_valid_ether_addr(saddr->sa_data))
939 return -EADDRNOTAVAIL;
940
941 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
942
943 hw_if->set_mac_address(pdata, netdev->dev_addr);
944
945 DBGPR("<--xgbe_set_mac_address\n");
946
947 return 0;
948}
949
950static int xgbe_change_mtu(struct net_device *netdev, int mtu)
951{
952 struct xgbe_prv_data *pdata = netdev_priv(netdev);
953 int ret;
954
955 DBGPR("-->xgbe_change_mtu\n");
956
957 ret = xgbe_calc_rx_buf_size(netdev, mtu);
958 if (ret < 0)
959 return ret;
960
961 pdata->rx_buf_size = ret;
962 netdev->mtu = mtu;
963
964 xgbe_restart_dev(pdata, 0);
965
966 DBGPR("<--xgbe_change_mtu\n");
967
968 return 0;
969}
970
971static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
972 struct rtnl_link_stats64 *s)
973{
974 struct xgbe_prv_data *pdata = netdev_priv(netdev);
975 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
976
977 DBGPR("-->%s\n", __func__);
978
979 pdata->hw_if.read_mmc_stats(pdata);
980
981 s->rx_packets = pstats->rxframecount_gb;
982 s->rx_bytes = pstats->rxoctetcount_gb;
983 s->rx_errors = pstats->rxframecount_gb -
984 pstats->rxbroadcastframes_g -
985 pstats->rxmulticastframes_g -
986 pstats->rxunicastframes_g;
987 s->multicast = pstats->rxmulticastframes_g;
988 s->rx_length_errors = pstats->rxlengtherror;
989 s->rx_crc_errors = pstats->rxcrcerror;
990 s->rx_fifo_errors = pstats->rxfifooverflow;
991
992 s->tx_packets = pstats->txframecount_gb;
993 s->tx_bytes = pstats->txoctetcount_gb;
994 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
995 s->tx_dropped = netdev->stats.tx_dropped;
996
997 DBGPR("<--%s\n", __func__);
998
999 return s;
1000}
1001
1002#ifdef CONFIG_NET_POLL_CONTROLLER
1003static void xgbe_poll_controller(struct net_device *netdev)
1004{
1005 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1006
1007 DBGPR("-->xgbe_poll_controller\n");
1008
1009 disable_irq(pdata->irq_number);
1010
1011 xgbe_isr(pdata->irq_number, pdata);
1012
1013 enable_irq(pdata->irq_number);
1014
1015 DBGPR("<--xgbe_poll_controller\n");
1016}
1017#endif /* End CONFIG_NET_POLL_CONTROLLER */
1018
1019static int xgbe_set_features(struct net_device *netdev,
1020 netdev_features_t features)
1021{
1022 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1023 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1024 unsigned int rxcsum_enabled, rxvlan_enabled;
1025
1026 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
1027 rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
1028
1029 if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
1030 hw_if->enable_rx_csum(pdata);
1031 netdev_alert(netdev, "state change - rxcsum enabled\n");
1032 } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
1033 hw_if->disable_rx_csum(pdata);
1034 netdev_alert(netdev, "state change - rxcsum disabled\n");
1035 }
1036
1037 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
1038 hw_if->enable_rx_vlan_stripping(pdata);
1039 netdev_alert(netdev, "state change - rxvlan enabled\n");
1040 } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
1041 hw_if->disable_rx_vlan_stripping(pdata);
1042 netdev_alert(netdev, "state change - rxvlan disabled\n");
1043 }
1044
1045 pdata->netdev_features = features;
1046
1047 DBGPR("<--xgbe_set_features\n");
1048
1049 return 0;
1050}
1051
1052static const struct net_device_ops xgbe_netdev_ops = {
1053 .ndo_open = xgbe_open,
1054 .ndo_stop = xgbe_close,
1055 .ndo_start_xmit = xgbe_xmit,
1056 .ndo_set_rx_mode = xgbe_set_rx_mode,
1057 .ndo_set_mac_address = xgbe_set_mac_address,
1058 .ndo_validate_addr = eth_validate_addr,
1059 .ndo_change_mtu = xgbe_change_mtu,
1060 .ndo_get_stats64 = xgbe_get_stats64,
1061#ifdef CONFIG_NET_POLL_CONTROLLER
1062 .ndo_poll_controller = xgbe_poll_controller,
1063#endif
1064 .ndo_set_features = xgbe_set_features,
1065};
1066
1067struct net_device_ops *xgbe_get_netdev_ops(void)
1068{
1069 return (struct net_device_ops *)&xgbe_netdev_ops;
1070}
1071
1072static int xgbe_tx_poll(struct xgbe_channel *channel)
1073{
1074 struct xgbe_prv_data *pdata = channel->pdata;
1075 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1076 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1077 struct xgbe_ring *ring = channel->tx_ring;
1078 struct xgbe_ring_data *rdata;
1079 struct xgbe_ring_desc *rdesc;
1080 struct net_device *netdev = pdata->netdev;
1081 unsigned long flags;
1082 int processed = 0;
1083
1084 DBGPR("-->xgbe_tx_poll\n");
1085
1086 /* Nothing to do if there isn't a Tx ring for this channel */
1087 if (!ring)
1088 return 0;
1089
1090 spin_lock_irqsave(&ring->lock, flags);
1091
1092 while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
1093 rdata = GET_DESC_DATA(ring, ring->dirty);
1094 rdesc = rdata->rdesc;
1095
1096 if (!hw_if->tx_complete(rdesc))
1097 break;
1098
1099#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1100 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1101#endif
1102
1103 /* Free the SKB and reset the descriptor for re-use */
1104 desc_if->unmap_skb(pdata, rdata);
1105 hw_if->tx_desc_reset(rdata);
1106
1107 processed++;
1108 ring->dirty++;
1109 }
1110
1111 if ((ring->tx.queue_stopped == 1) &&
1112 (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
1113 ring->tx.queue_stopped = 0;
1114 netif_wake_subqueue(netdev, channel->queue_index);
1115 }
1116
1117 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1118
1119 spin_unlock_irqrestore(&ring->lock, flags);
1120
1121 return processed;
1122}
1123
1124static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1125{
1126 struct xgbe_prv_data *pdata = channel->pdata;
1127 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1128 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1129 struct xgbe_ring *ring = channel->rx_ring;
1130 struct xgbe_ring_data *rdata;
1131 struct xgbe_packet_data *packet;
1132 struct net_device *netdev = pdata->netdev;
1133 struct sk_buff *skb;
1134 unsigned int incomplete, error;
1135 unsigned int cur_len, put_len, max_len;
1136 int received = 0;
1137
1138 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1139
1140 /* Nothing to do if there isn't a Rx ring for this channel */
1141 if (!ring)
1142 return 0;
1143
1144 packet = &ring->packet_data;
1145 while (received < budget) {
1146 DBGPR(" cur = %d\n", ring->cur);
1147
1148 /* Clear the packet data information */
1149 memset(packet, 0, sizeof(*packet));
1150 skb = NULL;
1151 error = 0;
1152 cur_len = 0;
1153
1154read_again:
1155 rdata = GET_DESC_DATA(ring, ring->cur);
1156
1157 if (hw_if->dev_read(channel))
1158 break;
1159
1160 received++;
1161 ring->cur++;
1162 ring->dirty++;
1163
1164 dma_unmap_single(pdata->dev, rdata->skb_dma,
1165 rdata->skb_dma_len, DMA_FROM_DEVICE);
1166 rdata->skb_dma = 0;
1167
1168 incomplete = XGMAC_GET_BITS(packet->attributes,
1169 RX_PACKET_ATTRIBUTES,
1170 INCOMPLETE);
1171
1172 /* Earlier error, just drain the remaining data */
1173 if (incomplete && error)
1174 goto read_again;
1175
1176 if (error || packet->errors) {
1177 if (packet->errors)
1178 DBGPR("Error in received packet\n");
1179 dev_kfree_skb(skb);
1180 continue;
1181 }
1182
1183 put_len = rdata->len - cur_len;
1184 if (skb) {
1185 if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
1186 DBGPR("pskb_expand_head error\n");
1187 if (incomplete) {
1188 error = 1;
1189 goto read_again;
1190 }
1191
1192 dev_kfree_skb(skb);
1193 continue;
1194 }
1195 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1196 put_len);
1197 } else {
1198 skb = rdata->skb;
1199 rdata->skb = NULL;
1200 }
1201 skb_put(skb, put_len);
1202 cur_len += put_len;
1203
1204 if (incomplete)
1205 goto read_again;
1206
1207 /* Be sure we don't exceed the configured MTU */
1208 max_len = netdev->mtu + ETH_HLEN;
1209 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1210 (skb->protocol == htons(ETH_P_8021Q)))
1211 max_len += VLAN_HLEN;
1212
1213 if (skb->len > max_len) {
1214 DBGPR("packet length exceeds configured MTU\n");
1215 dev_kfree_skb(skb);
1216 continue;
1217 }
1218
1219#ifdef XGMAC_ENABLE_RX_PKT_DUMP
1220 xgbe_print_pkt(netdev, skb, false);
1221#endif
1222
1223 skb_checksum_none_assert(skb);
1224 if (XGMAC_GET_BITS(packet->attributes,
1225 RX_PACKET_ATTRIBUTES, CSUM_DONE))
1226 skb->ip_summed = CHECKSUM_UNNECESSARY;
1227
1228 if (XGMAC_GET_BITS(packet->attributes,
1229 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1230 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1231 packet->vlan_ctag);
1232
1233 skb->dev = netdev;
1234 skb->protocol = eth_type_trans(skb, netdev);
1235 skb_record_rx_queue(skb, channel->queue_index);
1236 skb_mark_napi_id(skb, &pdata->napi);
1237
1238 netdev->last_rx = jiffies;
1239 napi_gro_receive(&pdata->napi, skb);
1240 }
1241
1242 if (received) {
1243 desc_if->realloc_skb(channel);
1244
1245 /* Update the Rx Tail Pointer Register with address of
1246 * the last cleaned entry */
1247 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1248 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1249 lower_32_bits(rdata->rdesc_dma));
1250 }
1251
1252 DBGPR("<--xgbe_rx_poll: received = %d\n", received);
1253
1254 return received;
1255}
1256
1257static int xgbe_poll(struct napi_struct *napi, int budget)
1258{
1259 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1260 napi);
1261 struct xgbe_channel *channel;
1262 int processed;
1263 unsigned int i;
1264
1265 DBGPR("-->xgbe_poll: budget=%d\n", budget);
1266
1267 /* Cleanup Tx ring first */
1268 channel = pdata->channel;
1269 for (i = 0; i < pdata->channel_count; i++, channel++)
1270 xgbe_tx_poll(channel);
1271
1272 /* Process Rx ring next */
1273 processed = 0;
1274 channel = pdata->channel;
1275 for (i = 0; i < pdata->channel_count; i++, channel++)
1276 processed += xgbe_rx_poll(channel, budget - processed);
1277
1278 /* If we processed everything, we are done */
1279 if (processed < budget) {
1280 /* Turn off polling */
1281 napi_complete(napi);
1282
1283 /* Enable Tx and Rx interrupts */
1284 xgbe_enable_rx_tx_ints(pdata);
1285 }
1286
1287 DBGPR("<--xgbe_poll: received = %d\n", processed);
1288
1289 return processed;
1290}
1291
1292void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1293 unsigned int count, unsigned int flag)
1294{
1295 struct xgbe_ring_data *rdata;
1296 struct xgbe_ring_desc *rdesc;
1297
1298 while (count--) {
1299 rdata = GET_DESC_DATA(ring, idx);
1300 rdesc = rdata->rdesc;
1301 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1302 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1303 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1304 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1305 idx++;
1306 }
1307}
1308
1309void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1310 unsigned int idx)
1311{
1312 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1313 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1314 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1315}
1316
1317void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1318{
1319 struct ethhdr *eth = (struct ethhdr *)skb->data;
1320 unsigned char *buf = skb->data;
1321 unsigned char buffer[128];
1322 unsigned int i, j;
1323
1324 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1325
1326 netdev_alert(netdev, "%s packet of %d bytes\n",
1327 (tx_rx ? "TX" : "RX"), skb->len);
1328
1329 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1330 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1331 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1332
1333 for (i = 0, j = 0; i < skb->len;) {
1334 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1335 buf[i++]);
1336
1337 if ((i % 32) == 0) {
1338 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
1339 j = 0;
1340 } else if ((i % 16) == 0) {
1341 buffer[j++] = ' ';
1342 buffer[j++] = ' ';
1343 } else if ((i % 4) == 0) {
1344 buffer[j++] = ' ';
1345 }
1346 }
1347 if (i % 32)
1348 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
1349
1350 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1351}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000000..8909f2b51af1
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,510 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/phy.h>
119
120#include "xgbe.h"
121#include "xgbe-common.h"
122
123
124struct xgbe_stats {
125 char stat_string[ETH_GSTRING_LEN];
126 int stat_size;
127 int stat_offset;
128};
129
130#define XGMAC_MMC_STAT(_string, _var) \
131 { _string, \
132 FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
133 offsetof(struct xgbe_prv_data, mmc_stats._var), \
134 }
135
136static const struct xgbe_stats xgbe_gstring_stats[] = {
137 XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
138 XGMAC_MMC_STAT("tx_packets", txframecount_gb),
139 XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
140 XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
141 XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
142 XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
143 XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
144 XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
145 XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
146 XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
147 XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
148 XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
149 XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
150 XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
151
152 XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
153 XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
154 XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
155 XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
156 XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
157 XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
158 XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
159 XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
160 XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
161 XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
162 XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
163 XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
164 XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
165 XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
166 XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
167 XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
168 XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
169 XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
170 XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
171 XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
172 XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
173 XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
174};
175#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
176
177static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
178{
179 int i;
180
181 DBGPR("-->%s\n", __func__);
182
183 switch (stringset) {
184 case ETH_SS_STATS:
185 for (i = 0; i < XGBE_STATS_COUNT; i++) {
186 memcpy(data, xgbe_gstring_stats[i].stat_string,
187 ETH_GSTRING_LEN);
188 data += ETH_GSTRING_LEN;
189 }
190 break;
191 }
192
193 DBGPR("<--%s\n", __func__);
194}
195
196static void xgbe_get_ethtool_stats(struct net_device *netdev,
197 struct ethtool_stats *stats, u64 *data)
198{
199 struct xgbe_prv_data *pdata = netdev_priv(netdev);
200 u8 *stat;
201 int i;
202
203 DBGPR("-->%s\n", __func__);
204
205 pdata->hw_if.read_mmc_stats(pdata);
206 for (i = 0; i < XGBE_STATS_COUNT; i++) {
207 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
208 *data++ = *(u64 *)stat;
209 }
210
211 DBGPR("<--%s\n", __func__);
212}
213
214static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
215{
216 int ret;
217
218 DBGPR("-->%s\n", __func__);
219
220 switch (stringset) {
221 case ETH_SS_STATS:
222 ret = XGBE_STATS_COUNT;
223 break;
224
225 default:
226 ret = -EOPNOTSUPP;
227 }
228
229 DBGPR("<--%s\n", __func__);
230
231 return ret;
232}
233
234static void xgbe_get_pauseparam(struct net_device *netdev,
235 struct ethtool_pauseparam *pause)
236{
237 struct xgbe_prv_data *pdata = netdev_priv(netdev);
238
239 DBGPR("-->xgbe_get_pauseparam\n");
240
241 pause->autoneg = pdata->pause_autoneg;
242 pause->tx_pause = pdata->tx_pause;
243 pause->rx_pause = pdata->rx_pause;
244
245 DBGPR("<--xgbe_get_pauseparam\n");
246}
247
248static int xgbe_set_pauseparam(struct net_device *netdev,
249 struct ethtool_pauseparam *pause)
250{
251 struct xgbe_prv_data *pdata = netdev_priv(netdev);
252 struct phy_device *phydev = pdata->phydev;
253 int ret = 0;
254
255 DBGPR("-->xgbe_set_pauseparam\n");
256
257 DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
258 pause->autoneg, pause->tx_pause, pause->rx_pause);
259
260 pdata->pause_autoneg = pause->autoneg;
261 if (pause->autoneg) {
262 phydev->advertising |= ADVERTISED_Pause;
263 phydev->advertising |= ADVERTISED_Asym_Pause;
264
265 } else {
266 phydev->advertising &= ~ADVERTISED_Pause;
267 phydev->advertising &= ~ADVERTISED_Asym_Pause;
268
269 pdata->tx_pause = pause->tx_pause;
270 pdata->rx_pause = pause->rx_pause;
271 }
272
273 if (netif_running(netdev))
274 ret = phy_start_aneg(phydev);
275
276 DBGPR("<--xgbe_set_pauseparam\n");
277
278 return ret;
279}
280
281static int xgbe_get_settings(struct net_device *netdev,
282 struct ethtool_cmd *cmd)
283{
284 struct xgbe_prv_data *pdata = netdev_priv(netdev);
285 int ret;
286
287 DBGPR("-->xgbe_get_settings\n");
288
289 if (!pdata->phydev)
290 return -ENODEV;
291
292 spin_lock_irq(&pdata->lock);
293
294 ret = phy_ethtool_gset(pdata->phydev, cmd);
295 cmd->transceiver = XCVR_EXTERNAL;
296
297 spin_unlock_irq(&pdata->lock);
298
299 DBGPR("<--xgbe_get_settings\n");
300
301 return ret;
302}
303
304static int xgbe_set_settings(struct net_device *netdev,
305 struct ethtool_cmd *cmd)
306{
307 struct xgbe_prv_data *pdata = netdev_priv(netdev);
308 struct phy_device *phydev = pdata->phydev;
309 u32 speed;
310 int ret;
311
312 DBGPR("-->xgbe_set_settings\n");
313
314 if (!pdata->phydev)
315 return -ENODEV;
316
317 spin_lock_irq(&pdata->lock);
318
319 speed = ethtool_cmd_speed(cmd);
320
321 ret = -EINVAL;
322 if (cmd->phy_address != phydev->addr)
323 goto unlock;
324
325 if ((cmd->autoneg != AUTONEG_ENABLE) &&
326 (cmd->autoneg != AUTONEG_DISABLE))
327 goto unlock;
328
329 if ((cmd->autoneg == AUTONEG_DISABLE) &&
330 (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
331 (cmd->duplex != DUPLEX_FULL)))
332 goto unlock;
333
334 if (cmd->autoneg == AUTONEG_ENABLE) {
335 /* Clear settings needed to force speeds */
336 phydev->supported &= ~SUPPORTED_1000baseT_Full;
337 phydev->supported &= ~SUPPORTED_10000baseT_Full;
338 } else {
339 /* Add settings needed to force speed */
340 phydev->supported |= SUPPORTED_1000baseT_Full;
341 phydev->supported |= SUPPORTED_10000baseT_Full;
342 }
343
344 cmd->advertising &= phydev->supported;
345 if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
346 goto unlock;
347
348 ret = 0;
349 phydev->autoneg = cmd->autoneg;
350 phydev->speed = speed;
351 phydev->duplex = cmd->duplex;
352 phydev->advertising = cmd->advertising;
353
354 if (cmd->autoneg == AUTONEG_ENABLE)
355 phydev->advertising |= ADVERTISED_Autoneg;
356 else
357 phydev->advertising &= ~ADVERTISED_Autoneg;
358
359 if (netif_running(netdev))
360 ret = phy_start_aneg(phydev);
361
362unlock:
363 spin_unlock_irq(&pdata->lock);
364
365 DBGPR("<--xgbe_set_settings\n");
366
367 return ret;
368}
369
370static void xgbe_get_drvinfo(struct net_device *netdev,
371 struct ethtool_drvinfo *drvinfo)
372{
373 struct xgbe_prv_data *pdata = netdev_priv(netdev);
374
375 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
376 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
377 strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
378 sizeof(drvinfo->bus_info));
379 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
380 XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
381 XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
382 XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
383 drvinfo->n_stats = XGBE_STATS_COUNT;
384}
385
386static int xgbe_get_coalesce(struct net_device *netdev,
387 struct ethtool_coalesce *ec)
388{
389 struct xgbe_prv_data *pdata = netdev_priv(netdev);
390 struct xgbe_hw_if *hw_if = &pdata->hw_if;
391 unsigned int riwt;
392
393 DBGPR("-->xgbe_get_coalesce\n");
394
395 memset(ec, 0, sizeof(struct ethtool_coalesce));
396
397 riwt = pdata->rx_riwt;
398 ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
399 ec->rx_max_coalesced_frames = pdata->rx_frames;
400
401 ec->tx_coalesce_usecs = pdata->tx_usecs;
402 ec->tx_max_coalesced_frames = pdata->tx_frames;
403
404 DBGPR("<--xgbe_get_coalesce\n");
405
406 return 0;
407}
408
409static int xgbe_set_coalesce(struct net_device *netdev,
410 struct ethtool_coalesce *ec)
411{
412 struct xgbe_prv_data *pdata = netdev_priv(netdev);
413 struct xgbe_hw_if *hw_if = &pdata->hw_if;
414 unsigned int rx_frames, rx_riwt, rx_usecs;
415 unsigned int tx_frames, tx_usecs;
416
417 DBGPR("-->xgbe_set_coalesce\n");
418
419 /* Check for not supported parameters */
420 if ((ec->rx_coalesce_usecs_irq) ||
421 (ec->rx_max_coalesced_frames_irq) ||
422 (ec->tx_coalesce_usecs_irq) ||
423 (ec->tx_max_coalesced_frames_irq) ||
424 (ec->stats_block_coalesce_usecs) ||
425 (ec->use_adaptive_rx_coalesce) ||
426 (ec->use_adaptive_tx_coalesce) ||
427 (ec->pkt_rate_low) ||
428 (ec->rx_coalesce_usecs_low) ||
429 (ec->rx_max_coalesced_frames_low) ||
430 (ec->tx_coalesce_usecs_low) ||
431 (ec->tx_max_coalesced_frames_low) ||
432 (ec->pkt_rate_high) ||
433 (ec->rx_coalesce_usecs_high) ||
434 (ec->rx_max_coalesced_frames_high) ||
435 (ec->tx_coalesce_usecs_high) ||
436 (ec->tx_max_coalesced_frames_high) ||
437 (ec->rate_sample_interval))
438 return -EOPNOTSUPP;
439
440 /* Can only change rx-frames when interface is down (see
441 * rx_descriptor_init in xgbe-dev.c)
442 */
443 rx_frames = pdata->rx_frames;
444 if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
445 netdev_alert(netdev,
446 "interface must be down to change rx-frames\n");
447 return -EINVAL;
448 }
449
450 rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
451 rx_frames = ec->rx_max_coalesced_frames;
452
453 /* Use smallest possible value if conversion resulted in zero */
454 if (ec->rx_coalesce_usecs && !rx_riwt)
455 rx_riwt = 1;
456
457 /* Check the bounds of values for Rx */
458 if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
459 rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
460 netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
461 rx_usecs);
462 return -EINVAL;
463 }
464 if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
465 netdev_alert(netdev, "rx-frames is limited to %d frames\n",
466 pdata->channel->rx_ring->rdesc_count);
467 return -EINVAL;
468 }
469
470 tx_usecs = ec->tx_coalesce_usecs;
471 tx_frames = ec->tx_max_coalesced_frames;
472
473 /* Check the bounds of values for Tx */
474 if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
475 netdev_alert(netdev, "tx-frames is limited to %d frames\n",
476 pdata->channel->tx_ring->rdesc_count);
477 return -EINVAL;
478 }
479
480 pdata->rx_riwt = rx_riwt;
481 pdata->rx_frames = rx_frames;
482 hw_if->config_rx_coalesce(pdata);
483
484 pdata->tx_usecs = tx_usecs;
485 pdata->tx_frames = tx_frames;
486 hw_if->config_tx_coalesce(pdata);
487
488 DBGPR("<--xgbe_set_coalesce\n");
489
490 return 0;
491}
492
493static const struct ethtool_ops xgbe_ethtool_ops = {
494 .get_settings = xgbe_get_settings,
495 .set_settings = xgbe_set_settings,
496 .get_drvinfo = xgbe_get_drvinfo,
497 .get_link = ethtool_op_get_link,
498 .get_coalesce = xgbe_get_coalesce,
499 .set_coalesce = xgbe_set_coalesce,
500 .get_pauseparam = xgbe_get_pauseparam,
501 .set_pauseparam = xgbe_set_pauseparam,
502 .get_strings = xgbe_get_strings,
503 .get_ethtool_stats = xgbe_get_ethtool_stats,
504 .get_sset_count = xgbe_get_sset_count,
505};
506
507struct ethtool_ops *xgbe_get_ethtool_ops(void)
508{
509 return (struct ethtool_ops *)&xgbe_ethtool_ops;
510}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000000..c83584a26713
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,512 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/module.h>
118#include <linux/device.h>
119#include <linux/platform_device.h>
120#include <linux/spinlock.h>
121#include <linux/netdevice.h>
122#include <linux/etherdevice.h>
123#include <linux/io.h>
124#include <linux/of.h>
125#include <linux/of_net.h>
126#include <linux/clk.h>
127
128#include "xgbe.h"
129#include "xgbe-common.h"
130
131
132MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
133MODULE_LICENSE("Dual BSD/GPL");
134MODULE_VERSION(XGBE_DRV_VERSION);
135MODULE_DESCRIPTION(XGBE_DRV_DESC);
136
137static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
138{
139 struct xgbe_channel *channel_mem, *channel;
140 struct xgbe_ring *tx_ring, *rx_ring;
141 unsigned int count, i;
142
143 DBGPR("-->xgbe_alloc_rings\n");
144
145 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
146
147 channel_mem = devm_kcalloc(pdata->dev, count,
148 sizeof(struct xgbe_channel), GFP_KERNEL);
149 if (!channel_mem)
150 return NULL;
151
152 tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
153 sizeof(struct xgbe_ring), GFP_KERNEL);
154 if (!tx_ring)
155 return NULL;
156
157 rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
158 sizeof(struct xgbe_ring), GFP_KERNEL);
159 if (!rx_ring)
160 return NULL;
161
162 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
163 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
164 channel->pdata = pdata;
165 channel->queue_index = i;
166 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
167 (DMA_CH_INC * i);
168
169 if (i < pdata->tx_ring_count) {
170 spin_lock_init(&tx_ring->lock);
171 channel->tx_ring = tx_ring++;
172 }
173
174 if (i < pdata->rx_ring_count) {
175 spin_lock_init(&tx_ring->lock);
176 channel->rx_ring = rx_ring++;
177 }
178
179 DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
180 channel->name, channel->queue_index, channel->dma_regs,
181 channel->tx_ring, channel->rx_ring);
182 }
183
184 pdata->channel_count = count;
185
186 DBGPR("<--xgbe_alloc_rings\n");
187
188 return channel_mem;
189}
190
191static void xgbe_default_config(struct xgbe_prv_data *pdata)
192{
193 DBGPR("-->xgbe_default_config\n");
194
195 pdata->pblx8 = DMA_PBL_X8_ENABLE;
196 pdata->tx_sf_mode = MTL_TSF_ENABLE;
197 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
198 pdata->tx_pbl = DMA_PBL_16;
199 pdata->tx_osp_mode = DMA_OSP_ENABLE;
200 pdata->rx_sf_mode = MTL_RSF_DISABLE;
201 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
202 pdata->rx_pbl = DMA_PBL_16;
203 pdata->pause_autoneg = 1;
204 pdata->tx_pause = 1;
205 pdata->rx_pause = 1;
206 pdata->power_down = 0;
207 pdata->default_autoneg = AUTONEG_ENABLE;
208 pdata->default_speed = SPEED_10000;
209
210 DBGPR("<--xgbe_default_config\n");
211}
212
213static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
214{
215 xgbe_init_function_ptrs_dev(&pdata->hw_if);
216 xgbe_init_function_ptrs_desc(&pdata->desc_if);
217}
218
219static int xgbe_probe(struct platform_device *pdev)
220{
221 struct xgbe_prv_data *pdata;
222 struct xgbe_hw_if *hw_if;
223 struct xgbe_desc_if *desc_if;
224 struct net_device *netdev;
225 struct device *dev = &pdev->dev;
226 struct resource *res;
227 const u8 *mac_addr;
228 int ret;
229
230 DBGPR("--> xgbe_probe\n");
231
232 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
233 XGBE_MAX_DMA_CHANNELS);
234 if (!netdev) {
235 dev_err(dev, "alloc_etherdev failed\n");
236 ret = -ENOMEM;
237 goto err_alloc;
238 }
239 SET_NETDEV_DEV(netdev, dev);
240 pdata = netdev_priv(netdev);
241 pdata->netdev = netdev;
242 pdata->pdev = pdev;
243 pdata->dev = dev;
244 platform_set_drvdata(pdev, netdev);
245
246 spin_lock_init(&pdata->lock);
247 mutex_init(&pdata->xpcs_mutex);
248
249 /* Set and validate the number of descriptors for a ring */
250 BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
251 pdata->tx_desc_count = TX_DESC_CNT;
252 if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
253 dev_err(dev, "tx descriptor count (%d) is not valid\n",
254 pdata->tx_desc_count);
255 ret = -EINVAL;
256 goto err_io;
257 }
258 BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
259 pdata->rx_desc_count = RX_DESC_CNT;
260 if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
261 dev_err(dev, "rx descriptor count (%d) is not valid\n",
262 pdata->rx_desc_count);
263 ret = -EINVAL;
264 goto err_io;
265 }
266
267 /* Obtain the system clock setting */
268 pdata->sysclock = devm_clk_get(dev, NULL);
269 if (IS_ERR(pdata->sysclock)) {
270 dev_err(dev, "devm_clk_get failed\n");
271 ret = PTR_ERR(pdata->sysclock);
272 goto err_io;
273 }
274
275 /* Obtain the mmio areas for the device */
276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
278 if (IS_ERR(pdata->xgmac_regs)) {
279 dev_err(dev, "xgmac ioremap failed\n");
280 ret = PTR_ERR(pdata->xgmac_regs);
281 goto err_io;
282 }
283 DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
284
285 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
286 pdata->xpcs_regs = devm_ioremap_resource(dev, res);
287 if (IS_ERR(pdata->xpcs_regs)) {
288 dev_err(dev, "xpcs ioremap failed\n");
289 ret = PTR_ERR(pdata->xpcs_regs);
290 goto err_io;
291 }
292 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
293
294 /* Set the DMA mask */
295 if (!dev->dma_mask)
296 dev->dma_mask = &dev->coherent_dma_mask;
297 *(dev->dma_mask) = DMA_BIT_MASK(40);
298 dev->coherent_dma_mask = DMA_BIT_MASK(40);
299
300 ret = platform_get_irq(pdev, 0);
301 if (ret < 0) {
302 dev_err(dev, "platform_get_irq failed\n");
303 goto err_io;
304 }
305 netdev->irq = ret;
306 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
307
308 /* Set all the function pointers */
309 xgbe_init_all_fptrs(pdata);
310 hw_if = &pdata->hw_if;
311 desc_if = &pdata->desc_if;
312
313 /* Issue software reset to device */
314 hw_if->exit(pdata);
315
316 /* Populate the hardware features */
317 xgbe_get_all_hw_features(pdata);
318
319 /* Retrieve the MAC address */
320 mac_addr = of_get_mac_address(dev->of_node);
321 if (!mac_addr) {
322 dev_err(dev, "invalid mac address for this device\n");
323 ret = -EINVAL;
324 goto err_io;
325 }
326 memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
327
328 /* Retrieve the PHY mode - it must be "xgmii" */
329 pdata->phy_mode = of_get_phy_mode(dev->of_node);
330 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
331 dev_err(dev, "invalid phy-mode specified for this device\n");
332 ret = -EINVAL;
333 goto err_io;
334 }
335
336 /* Set default configuration data */
337 xgbe_default_config(pdata);
338
339 /* Calculate the number of Tx and Rx rings to be created */
340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
341 pdata->hw_feat.tx_ch_cnt);
342 if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
343 dev_err(dev, "error setting real tx queue count\n");
344 goto err_io;
345 }
346
347 pdata->rx_ring_count = min_t(unsigned int,
348 netif_get_num_default_rss_queues(),
349 pdata->hw_feat.rx_ch_cnt);
350 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
351 if (ret) {
352 dev_err(dev, "error setting real rx queue count\n");
353 goto err_io;
354 }
355
356 /* Allocate the rings for the DMA channels */
357 pdata->channel = xgbe_alloc_rings(pdata);
358 if (!pdata->channel) {
359 dev_err(dev, "ring allocation failed\n");
360 ret = -ENOMEM;
361 goto err_io;
362 }
363
364 /* Prepare to regsiter with MDIO */
365 pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
366 if (!pdata->mii_bus_id) {
367 dev_err(dev, "failed to allocate mii bus id\n");
368 ret = -ENOMEM;
369 goto err_io;
370 }
371 ret = xgbe_mdio_register(pdata);
372 if (ret)
373 goto err_bus_id;
374
375 /* Set network and ethtool operations */
376 netdev->netdev_ops = xgbe_get_netdev_ops();
377 netdev->ethtool_ops = xgbe_get_ethtool_ops();
378
379 /* Set device features */
380 netdev->hw_features = NETIF_F_SG |
381 NETIF_F_IP_CSUM |
382 NETIF_F_IPV6_CSUM |
383 NETIF_F_RXCSUM |
384 NETIF_F_TSO |
385 NETIF_F_TSO6 |
386 NETIF_F_GRO |
387 NETIF_F_HW_VLAN_CTAG_RX |
388 NETIF_F_HW_VLAN_CTAG_TX;
389
390 netdev->vlan_features |= NETIF_F_SG |
391 NETIF_F_IP_CSUM |
392 NETIF_F_IPV6_CSUM |
393 NETIF_F_TSO |
394 NETIF_F_TSO6;
395
396 netdev->features |= netdev->hw_features;
397 pdata->netdev_features = netdev->features;
398
399 xgbe_init_rx_coalesce(pdata);
400 xgbe_init_tx_coalesce(pdata);
401
402 netif_carrier_off(netdev);
403 ret = register_netdev(netdev);
404 if (ret) {
405 dev_err(dev, "net device registration failed\n");
406 goto err_reg_netdev;
407 }
408
409 xgbe_debugfs_init(pdata);
410
411 netdev_notice(netdev, "net device enabled\n");
412
413 DBGPR("<-- xgbe_probe\n");
414
415 return 0;
416
417err_reg_netdev:
418 xgbe_mdio_unregister(pdata);
419
420err_bus_id:
421 kfree(pdata->mii_bus_id);
422
423err_io:
424 free_netdev(netdev);
425
426err_alloc:
427 dev_notice(dev, "net device not enabled\n");
428
429 return ret;
430}
431
432static int xgbe_remove(struct platform_device *pdev)
433{
434 struct net_device *netdev = platform_get_drvdata(pdev);
435 struct xgbe_prv_data *pdata = netdev_priv(netdev);
436
437 DBGPR("-->xgbe_remove\n");
438
439 xgbe_debugfs_exit(pdata);
440
441 unregister_netdev(netdev);
442
443 xgbe_mdio_unregister(pdata);
444
445 kfree(pdata->mii_bus_id);
446
447 free_netdev(netdev);
448
449 DBGPR("<--xgbe_remove\n");
450
451 return 0;
452}
453
454#ifdef CONFIG_PM
455static int xgbe_suspend(struct device *dev)
456{
457 struct net_device *netdev = dev_get_drvdata(dev);
458 int ret;
459
460 DBGPR("-->xgbe_suspend\n");
461
462 if (!netif_running(netdev)) {
463 DBGPR("<--xgbe_dev_suspend\n");
464 return -EINVAL;
465 }
466
467 ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
468
469 DBGPR("<--xgbe_suspend\n");
470
471 return ret;
472}
473
474static int xgbe_resume(struct device *dev)
475{
476 struct net_device *netdev = dev_get_drvdata(dev);
477 int ret;
478
479 DBGPR("-->xgbe_resume\n");
480
481 if (!netif_running(netdev)) {
482 DBGPR("<--xgbe_dev_resume\n");
483 return -EINVAL;
484 }
485
486 ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
487
488 DBGPR("<--xgbe_resume\n");
489
490 return ret;
491}
492#endif /* CONFIG_PM */
493
494static const struct of_device_id xgbe_of_match[] = {
495 { .compatible = "amd,xgbe-seattle-v1a", },
496 {},
497};
498
499MODULE_DEVICE_TABLE(of, xgbe_of_match);
500static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
501
502static struct platform_driver xgbe_driver = {
503 .driver = {
504 .name = "amd-xgbe",
505 .of_match_table = xgbe_of_match,
506 .pm = &xgbe_pm_ops,
507 },
508 .probe = xgbe_probe,
509 .remove = xgbe_remove,
510};
511
512module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000000..ea7a5d6750ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,433 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/module.h>
118#include <linux/kmod.h>
119#include <linux/spinlock.h>
120#include <linux/mdio.h>
121#include <linux/phy.h>
122#include <linux/of.h>
123
124#include "xgbe.h"
125#include "xgbe-common.h"
126
127
128static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
129{
130 struct xgbe_prv_data *pdata = mii->priv;
131 struct xgbe_hw_if *hw_if = &pdata->hw_if;
132 int mmd_data;
133
134 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
135 prtad, mmd_reg);
136
137 mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
138
139 DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
140
141 return mmd_data;
142}
143
144static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
145 u16 mmd_val)
146{
147 struct xgbe_prv_data *pdata = mii->priv;
148 struct xgbe_hw_if *hw_if = &pdata->hw_if;
149 int mmd_data = mmd_val;
150
151 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
152 prtad, mmd_reg, mmd_data);
153
154 hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
155
156 DBGPR_MDIO("<--xgbe_mdio_write\n");
157
158 return 0;
159}
160
161static void xgbe_adjust_link(struct net_device *netdev)
162{
163 struct xgbe_prv_data *pdata = netdev_priv(netdev);
164 struct xgbe_hw_if *hw_if = &pdata->hw_if;
165 struct phy_device *phydev = pdata->phydev;
166 unsigned long flags;
167 int new_state = 0;
168
169 if (phydev == NULL)
170 return;
171
172 DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
173 phydev->addr, phydev->link, pdata->phy_link);
174
175 spin_lock_irqsave(&pdata->lock, flags);
176
177 if (phydev->link) {
178 /* Flow control support */
179 if (pdata->pause_autoneg) {
180 if (phydev->pause || phydev->asym_pause) {
181 pdata->tx_pause = 1;
182 pdata->rx_pause = 1;
183 } else {
184 pdata->tx_pause = 0;
185 pdata->rx_pause = 0;
186 }
187 }
188
189 if (pdata->tx_pause != pdata->phy_tx_pause) {
190 hw_if->config_tx_flow_control(pdata);
191 pdata->phy_tx_pause = pdata->tx_pause;
192 }
193
194 if (pdata->rx_pause != pdata->phy_rx_pause) {
195 hw_if->config_rx_flow_control(pdata);
196 pdata->phy_rx_pause = pdata->rx_pause;
197 }
198
199 /* Speed support */
200 if (phydev->speed != pdata->phy_speed) {
201 new_state = 1;
202
203 switch (phydev->speed) {
204 case SPEED_10000:
205 hw_if->set_xgmii_speed(pdata);
206 break;
207
208 case SPEED_2500:
209 hw_if->set_gmii_2500_speed(pdata);
210 break;
211
212 case SPEED_1000:
213 hw_if->set_gmii_speed(pdata);
214 break;
215 }
216 pdata->phy_speed = phydev->speed;
217 }
218
219 if (phydev->link != pdata->phy_link) {
220 new_state = 1;
221 pdata->phy_link = 1;
222 }
223 } else if (pdata->phy_link) {
224 new_state = 1;
225 pdata->phy_link = 0;
226 pdata->phy_speed = SPEED_UNKNOWN;
227 }
228
229 if (new_state)
230 phy_print_status(phydev);
231
232 spin_unlock_irqrestore(&pdata->lock, flags);
233
234 DBGPR_MDIO("<--xgbe_adjust_link\n");
235}
236
237void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
238{
239 struct device *dev = pdata->dev;
240 struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
241 int i;
242
243 dev_alert(dev, "\n************* PHY Reg dump **********************\n");
244
245 dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
246 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
247 dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
248 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
249 dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
250 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
251 dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
252 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
253 dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
254 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
255 dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
256 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
257
258 dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
259 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
260 dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
261 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
262 dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
263 MDIO_AN_ADVERTISE,
264 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
265 dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
266 MDIO_AN_ADVERTISE + 1,
267 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
268 dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
269 MDIO_AN_ADVERTISE + 2,
270 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
271 dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
272 MDIO_AN_COMP_STAT,
273 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
274
275 dev_alert(dev, "MMD Device Mask = %#x\n",
276 phydev->c45_ids.devices_in_package);
277 for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
278 dev_alert(dev, " MMD %d: ID = %#08x\n", i,
279 phydev->c45_ids.device_ids[i]);
280
281 dev_alert(dev, "\n*************************************************\n");
282}
283
284int xgbe_mdio_register(struct xgbe_prv_data *pdata)
285{
286 struct net_device *netdev = pdata->netdev;
287 struct device_node *phy_node;
288 struct mii_bus *mii;
289 struct phy_device *phydev;
290 int ret = 0;
291
292 DBGPR("-->xgbe_mdio_register\n");
293
294 /* Retrieve the phy-handle */
295 phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
296 if (!phy_node) {
297 dev_err(pdata->dev, "unable to parse phy-handle\n");
298 return -EINVAL;
299 }
300
301 /* Register with the MDIO bus */
302 mii = mdiobus_alloc();
303 if (mii == NULL) {
304 dev_err(pdata->dev, "mdiobus_alloc failed\n");
305 ret = -ENOMEM;
306 goto err_node_get;
307 }
308
309 /* Register on the MDIO bus (don't probe any PHYs) */
310 mii->name = XGBE_PHY_NAME;
311 mii->read = xgbe_mdio_read;
312 mii->write = xgbe_mdio_write;
313 snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
314 mii->priv = pdata;
315 mii->phy_mask = ~0;
316 mii->parent = pdata->dev;
317 ret = mdiobus_register(mii);
318 if (ret) {
319 dev_err(pdata->dev, "mdiobus_register failed\n");
320 goto err_mdiobus_alloc;
321 }
322 DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
323
324 /* Probe the PCS using Clause 45 */
325 phydev = get_phy_device(mii, XGBE_PRTAD, true);
326 if (IS_ERR(phydev) || !phydev ||
327 !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
328 dev_err(pdata->dev, "get_phy_device failed\n");
329 ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
330 goto err_mdiobus_register;
331 }
332 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
333 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
334
335 of_node_get(phy_node);
336 phydev->dev.of_node = phy_node;
337 ret = phy_device_register(phydev);
338 if (ret) {
339 dev_err(pdata->dev, "phy_device_register failed\n");
340 of_node_put(phy_node);
341 goto err_phy_device;
342 }
343
344 /* Add a reference to the PHY driver so it can't be unloaded */
345 pdata->phy_module = phydev->dev.driver ?
346 phydev->dev.driver->owner : NULL;
347 if (!try_module_get(pdata->phy_module)) {
348 dev_err(pdata->dev, "try_module_get failed\n");
349 ret = -EIO;
350 goto err_phy_device;
351 }
352
353 pdata->mii = mii;
354 pdata->mdio_mmd = MDIO_MMD_PCS;
355
356 pdata->phy_link = -1;
357 pdata->phy_speed = SPEED_UNKNOWN;
358 pdata->phy_tx_pause = pdata->tx_pause;
359 pdata->phy_rx_pause = pdata->rx_pause;
360
361 ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
362 pdata->phy_mode);
363 if (ret) {
364 netdev_err(netdev, "phy_connect_direct failed\n");
365 goto err_phy_device;
366 }
367
368 if (!phydev->drv || (phydev->drv->phy_id == 0)) {
369 netdev_err(netdev, "phy_id not valid\n");
370 ret = -ENODEV;
371 goto err_phy_connect;
372 }
373 DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
374 dev_name(&phydev->dev), phydev->link);
375
376 phydev->autoneg = pdata->default_autoneg;
377 if (phydev->autoneg == AUTONEG_DISABLE) {
378 /* Add settings needed to force speed */
379 phydev->supported |= SUPPORTED_1000baseT_Full;
380 phydev->supported |= SUPPORTED_10000baseT_Full;
381
382 phydev->speed = pdata->default_speed;
383 phydev->duplex = DUPLEX_FULL;
384
385 phydev->advertising &= ~ADVERTISED_Autoneg;
386 }
387
388 pdata->phydev = phydev;
389
390 of_node_put(phy_node);
391
392 DBGPHY_REGS(pdata);
393
394 DBGPR("<--xgbe_mdio_register\n");
395
396 return 0;
397
398err_phy_connect:
399 phy_disconnect(phydev);
400
401err_phy_device:
402 phy_device_free(phydev);
403
404err_mdiobus_register:
405 mdiobus_unregister(mii);
406
407err_mdiobus_alloc:
408 mdiobus_free(mii);
409
410err_node_get:
411 of_node_put(phy_node);
412
413 return ret;
414}
415
416void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
417{
418 DBGPR("-->xgbe_mdio_unregister\n");
419
420 phy_disconnect(pdata->phydev);
421 pdata->phydev = NULL;
422
423 module_put(pdata->phy_module);
424 pdata->phy_module = NULL;
425
426 mdiobus_unregister(pdata->mii);
427 pdata->mii->priv = NULL;
428
429 mdiobus_free(pdata->mii);
430 pdata->mii = NULL;
431
432 DBGPR("<--xgbe_mdio_unregister\n");
433}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000000..ab0627162c01
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,676 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#ifndef __XGBE_H__
118#define __XGBE_H__
119
120#include <linux/dma-mapping.h>
121#include <linux/netdevice.h>
122#include <linux/workqueue.h>
123#include <linux/phy.h>
124
125
126#define XGBE_DRV_NAME "amd-xgbe"
127#define XGBE_DRV_VERSION "1.0.0-a"
128#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
129
130/* Descriptor related defines */
131#define TX_DESC_CNT 512
132#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
133#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
134#define RX_DESC_CNT 512
135
136#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
137
138#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
139#define RX_BUF_ALIGN 64
140
141#define XGBE_MAX_DMA_CHANNELS 16
142#define DMA_ARDOMAIN_SETTING 0x2
143#define DMA_ARCACHE_SETTING 0xb
144#define DMA_AWDOMAIN_SETTING 0x2
145#define DMA_AWCACHE_SETTING 0x7
146#define DMA_INTERRUPT_MASK 0x31c7
147
148#define XGMAC_MIN_PACKET 60
149#define XGMAC_STD_PACKET_MTU 1500
150#define XGMAC_MAX_STD_PACKET 1518
151#define XGMAC_JUMBO_PACKET_MTU 9000
152#define XGMAC_MAX_JUMBO_PACKET 9018
153
154#define MAX_MULTICAST_LIST 14
155#define TX_FLAGS_IP_PKT 0x00000001
156#define TX_FLAGS_TCP_PKT 0x00000002
157
158/* MDIO bus phy name */
159#define XGBE_PHY_NAME "amd_xgbe_phy"
160#define XGBE_PRTAD 0
161
162/* Driver PMT macros */
163#define XGMAC_DRIVER_CONTEXT 1
164#define XGMAC_IOCTL_CONTEXT 2
165
166#define FIFO_SIZE_B(x) (x)
167#define FIFO_SIZE_KB(x) (x * 1024)
168
169#define XGBE_TC_CNT 2
170
171/* Helper macro for descriptor handling
172 * Always use GET_DESC_DATA to access the descriptor data
173 * since the index is free-running and needs to be and-ed
174 * with the descriptor count value of the ring to index to
175 * the proper descriptor data.
176 */
177#define GET_DESC_DATA(_ring, _idx) \
178 ((_ring)->rdata + \
179 ((_idx) & ((_ring)->rdesc_count - 1)))
180
181
182/* Default coalescing parameters */
183#define XGMAC_INIT_DMA_TX_USECS 100
184#define XGMAC_INIT_DMA_TX_FRAMES 16
185
186#define XGMAC_MAX_DMA_RIWT 0xff
187#define XGMAC_INIT_DMA_RX_USECS 100
188#define XGMAC_INIT_DMA_RX_FRAMES 16
189
190/* Flow control queue count */
191#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
192
193
194struct xgbe_prv_data;
195
196struct xgbe_packet_data {
197 unsigned int attributes;
198
199 unsigned int errors;
200
201 unsigned int rdesc_count;
202 unsigned int length;
203
204 unsigned int header_len;
205 unsigned int tcp_header_len;
206 unsigned int tcp_payload_len;
207 unsigned short mss;
208
209 unsigned short vlan_ctag;
210};
211
212/* Common Rx and Tx descriptor mapping */
213struct xgbe_ring_desc {
214 unsigned int desc0;
215 unsigned int desc1;
216 unsigned int desc2;
217 unsigned int desc3;
218};
219
220/* Structure used to hold information related to the descriptor
221 * and the packet associated with the descriptor (always use
222 * use the GET_DESC_DATA macro to access this data from the ring)
223 */
224struct xgbe_ring_data {
225 struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
226 dma_addr_t rdesc_dma; /* DMA address of descriptor */
227
228 struct sk_buff *skb; /* Virtual address of SKB */
229 dma_addr_t skb_dma; /* DMA address of SKB data */
230 unsigned int skb_dma_len; /* Length of SKB DMA area */
231 unsigned int tso_header; /* TSO header indicator */
232
233 unsigned short len; /* Length of received Rx packet */
234
235 unsigned int interrupt; /* Interrupt indicator */
236
237 unsigned int mapped_as_page;
238};
239
240struct xgbe_ring {
241 /* Ring lock - used just for TX rings at the moment */
242 spinlock_t lock;
243
244 /* Per packet related information */
245 struct xgbe_packet_data packet_data;
246
247 /* Virtual/DMA addresses and count of allocated descriptor memory */
248 struct xgbe_ring_desc *rdesc;
249 dma_addr_t rdesc_dma;
250 unsigned int rdesc_count;
251
252 /* Array of descriptor data corresponding the descriptor memory
253 * (always use the GET_DESC_DATA macro to access this data)
254 */
255 struct xgbe_ring_data *rdata;
256
257 /* Ring index values
258 * cur - Tx: index of descriptor to be used for current transfer
259 * Rx: index of descriptor to check for packet availability
260 * dirty - Tx: index of descriptor to check for transfer complete
261 * Rx: count of descriptors in which a packet has been received
262 * (used with skb_realloc_index to refresh the ring)
263 */
264 unsigned int cur;
265 unsigned int dirty;
266
267 /* Coalesce frame count used for interrupt bit setting */
268 unsigned int coalesce_count;
269
270 union {
271 struct {
272 unsigned int queue_stopped;
273 unsigned short cur_mss;
274 unsigned short cur_vlan_ctag;
275 } tx;
276
277 struct {
278 unsigned int realloc_index;
279 unsigned int realloc_threshold;
280 } rx;
281 };
282} ____cacheline_aligned;
283
284/* Structure used to describe the descriptor rings associated with
285 * a DMA channel.
286 */
287struct xgbe_channel {
288 char name[16];
289
290 /* Address of private data area for device */
291 struct xgbe_prv_data *pdata;
292
293 /* Queue index and base address of queue's DMA registers */
294 unsigned int queue_index;
295 void __iomem *dma_regs;
296
297 unsigned int saved_ier;
298
299 unsigned int tx_timer_active;
300 struct hrtimer tx_timer;
301
302 struct xgbe_ring *tx_ring;
303 struct xgbe_ring *rx_ring;
304} ____cacheline_aligned;
305
306enum xgbe_int {
307 XGMAC_INT_DMA_ISR_DC0IS,
308 XGMAC_INT_DMA_CH_SR_TI,
309 XGMAC_INT_DMA_CH_SR_TPS,
310 XGMAC_INT_DMA_CH_SR_TBU,
311 XGMAC_INT_DMA_CH_SR_RI,
312 XGMAC_INT_DMA_CH_SR_RBU,
313 XGMAC_INT_DMA_CH_SR_RPS,
314 XGMAC_INT_DMA_CH_SR_FBE,
315 XGMAC_INT_DMA_ALL,
316};
317
318enum xgbe_int_state {
319 XGMAC_INT_STATE_SAVE,
320 XGMAC_INT_STATE_RESTORE,
321};
322
323enum xgbe_mtl_fifo_size {
324 XGMAC_MTL_FIFO_SIZE_256 = 0x00,
325 XGMAC_MTL_FIFO_SIZE_512 = 0x01,
326 XGMAC_MTL_FIFO_SIZE_1K = 0x03,
327 XGMAC_MTL_FIFO_SIZE_2K = 0x07,
328 XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
329 XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
330 XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
331 XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
332 XGMAC_MTL_FIFO_SIZE_64K = 0xff,
333 XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
334 XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
335};
336
337struct xgbe_mmc_stats {
338 /* Tx Stats */
339 u64 txoctetcount_gb;
340 u64 txframecount_gb;
341 u64 txbroadcastframes_g;
342 u64 txmulticastframes_g;
343 u64 tx64octets_gb;
344 u64 tx65to127octets_gb;
345 u64 tx128to255octets_gb;
346 u64 tx256to511octets_gb;
347 u64 tx512to1023octets_gb;
348 u64 tx1024tomaxoctets_gb;
349 u64 txunicastframes_gb;
350 u64 txmulticastframes_gb;
351 u64 txbroadcastframes_gb;
352 u64 txunderflowerror;
353 u64 txoctetcount_g;
354 u64 txframecount_g;
355 u64 txpauseframes;
356 u64 txvlanframes_g;
357
358 /* Rx Stats */
359 u64 rxframecount_gb;
360 u64 rxoctetcount_gb;
361 u64 rxoctetcount_g;
362 u64 rxbroadcastframes_g;
363 u64 rxmulticastframes_g;
364 u64 rxcrcerror;
365 u64 rxrunterror;
366 u64 rxjabbererror;
367 u64 rxundersize_g;
368 u64 rxoversize_g;
369 u64 rx64octets_gb;
370 u64 rx65to127octets_gb;
371 u64 rx128to255octets_gb;
372 u64 rx256to511octets_gb;
373 u64 rx512to1023octets_gb;
374 u64 rx1024tomaxoctets_gb;
375 u64 rxunicastframes_g;
376 u64 rxlengtherror;
377 u64 rxoutofrangetype;
378 u64 rxpauseframes;
379 u64 rxfifooverflow;
380 u64 rxvlanframes_gb;
381 u64 rxwatchdogerror;
382};
383
384struct xgbe_hw_if {
385 int (*tx_complete)(struct xgbe_ring_desc *);
386
387 int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
388 int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
389 int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
390 int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
391
392 int (*enable_rx_csum)(struct xgbe_prv_data *);
393 int (*disable_rx_csum)(struct xgbe_prv_data *);
394
395 int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
396 int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
397
398 int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
399 void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
400 int (*set_gmii_speed)(struct xgbe_prv_data *);
401 int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
402 int (*set_xgmii_speed)(struct xgbe_prv_data *);
403
404 void (*enable_tx)(struct xgbe_prv_data *);
405 void (*disable_tx)(struct xgbe_prv_data *);
406 void (*enable_rx)(struct xgbe_prv_data *);
407 void (*disable_rx)(struct xgbe_prv_data *);
408
409 void (*powerup_tx)(struct xgbe_prv_data *);
410 void (*powerdown_tx)(struct xgbe_prv_data *);
411 void (*powerup_rx)(struct xgbe_prv_data *);
412 void (*powerdown_rx)(struct xgbe_prv_data *);
413
414 int (*init)(struct xgbe_prv_data *);
415 int (*exit)(struct xgbe_prv_data *);
416
417 int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
418 int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
419 void (*pre_xmit)(struct xgbe_channel *);
420 int (*dev_read)(struct xgbe_channel *);
421 void (*tx_desc_init)(struct xgbe_channel *);
422 void (*rx_desc_init)(struct xgbe_channel *);
423 void (*rx_desc_reset)(struct xgbe_ring_data *);
424 void (*tx_desc_reset)(struct xgbe_ring_data *);
425 int (*is_last_desc)(struct xgbe_ring_desc *);
426 int (*is_context_desc)(struct xgbe_ring_desc *);
427
428 /* For FLOW ctrl */
429 int (*config_tx_flow_control)(struct xgbe_prv_data *);
430 int (*config_rx_flow_control)(struct xgbe_prv_data *);
431
432 /* For RX coalescing */
433 int (*config_rx_coalesce)(struct xgbe_prv_data *);
434 int (*config_tx_coalesce)(struct xgbe_prv_data *);
435 unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
436 unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
437
438 /* For RX and TX threshold config */
439 int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
440 int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
441
442 /* For RX and TX Store and Forward Mode config */
443 int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
444 int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
445
446 /* For TX DMA Operate on Second Frame config */
447 int (*config_osp_mode)(struct xgbe_prv_data *);
448
449 /* For RX and TX PBL config */
450 int (*config_rx_pbl_val)(struct xgbe_prv_data *);
451 int (*get_rx_pbl_val)(struct xgbe_prv_data *);
452 int (*config_tx_pbl_val)(struct xgbe_prv_data *);
453 int (*get_tx_pbl_val)(struct xgbe_prv_data *);
454 int (*config_pblx8)(struct xgbe_prv_data *);
455
456 /* For MMC statistics */
457 void (*rx_mmc_int)(struct xgbe_prv_data *);
458 void (*tx_mmc_int)(struct xgbe_prv_data *);
459 void (*read_mmc_stats)(struct xgbe_prv_data *);
460};
461
462struct xgbe_desc_if {
463 int (*alloc_ring_resources)(struct xgbe_prv_data *);
464 void (*free_ring_resources)(struct xgbe_prv_data *);
465 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
466 void (*realloc_skb)(struct xgbe_channel *);
467 void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
468 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
469 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
470};
471
472/* This structure contains flags that indicate what hardware features
473 * or configurations are present in the device.
474 */
475struct xgbe_hw_features {
476 /* HW Feature Register0 */
477 unsigned int gmii; /* 1000 Mbps support */
478 unsigned int vlhash; /* VLAN Hash Filter */
479 unsigned int sma; /* SMA(MDIO) Interface */
480 unsigned int rwk; /* PMT remote wake-up packet */
481 unsigned int mgk; /* PMT magic packet */
482 unsigned int mmc; /* RMON module */
483 unsigned int aoe; /* ARP Offload */
484 unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
485 unsigned int eee; /* Energy Efficient Ethernet */
486 unsigned int tx_coe; /* Tx Checksum Offload */
487 unsigned int rx_coe; /* Rx Checksum Offload */
488 unsigned int addn_mac; /* Additional MAC Addresses */
489 unsigned int ts_src; /* Timestamp Source */
490 unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
491
492 /* HW Feature Register1 */
493 unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
494 unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
495 unsigned int adv_ts_hi; /* Advance Timestamping High Word */
496 unsigned int dcb; /* DCB Feature */
497 unsigned int sph; /* Split Header Feature */
498 unsigned int tso; /* TCP Segmentation Offload */
499 unsigned int dma_debug; /* DMA Debug Registers */
500 unsigned int rss; /* Receive Side Scaling */
501 unsigned int hash_table_size; /* Hash Table Size */
502 unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
503
504 /* HW Feature Register2 */
505 unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
506 unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
507 unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
508 unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
509 unsigned int pps_out_num; /* Number of PPS outputs */
510 unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
511};
512
513struct xgbe_prv_data {
514 struct net_device *netdev;
515 struct platform_device *pdev;
516 struct device *dev;
517
518 /* XGMAC/XPCS related mmio registers */
519 void __iomem *xgmac_regs; /* XGMAC CSRs */
520 void __iomem *xpcs_regs; /* XPCS MMD registers */
521
522 /* Overall device lock */
523 spinlock_t lock;
524
525 /* XPCS indirect addressing mutex */
526 struct mutex xpcs_mutex;
527
528 int irq_number;
529
530 struct xgbe_hw_if hw_if;
531 struct xgbe_desc_if desc_if;
532
533 /* Rings for Tx/Rx on a DMA channel */
534 struct xgbe_channel *channel;
535 unsigned int channel_count;
536 unsigned int tx_ring_count;
537 unsigned int tx_desc_count;
538 unsigned int rx_ring_count;
539 unsigned int rx_desc_count;
540
541 /* Tx/Rx common settings */
542 unsigned int pblx8;
543
544 /* Tx settings */
545 unsigned int tx_sf_mode;
546 unsigned int tx_threshold;
547 unsigned int tx_pbl;
548 unsigned int tx_osp_mode;
549
550 /* Rx settings */
551 unsigned int rx_sf_mode;
552 unsigned int rx_threshold;
553 unsigned int rx_pbl;
554
555 /* Tx coalescing settings */
556 unsigned int tx_usecs;
557 unsigned int tx_frames;
558
559 /* Rx coalescing settings */
560 unsigned int rx_riwt;
561 unsigned int rx_frames;
562
563 /* Current MTU */
564 unsigned int rx_buf_size;
565
566 /* Flow control settings */
567 unsigned int pause_autoneg;
568 unsigned int tx_pause;
569 unsigned int rx_pause;
570
571 /* MDIO settings */
572 struct module *phy_module;
573 char *mii_bus_id;
574 struct mii_bus *mii;
575 int mdio_mmd;
576 struct phy_device *phydev;
577 int default_autoneg;
578 int default_speed;
579
580 /* Current PHY settings */
581 phy_interface_t phy_mode;
582 int phy_link;
583 int phy_speed;
584 unsigned int phy_tx_pause;
585 unsigned int phy_rx_pause;
586
587 /* Netdev related settings */
588 netdev_features_t netdev_features;
589 struct napi_struct napi;
590 struct xgbe_mmc_stats mmc_stats;
591
592 /* System clock value used for Rx watchdog */
593 struct clk *sysclock;
594
595 /* Hardware features of the device */
596 struct xgbe_hw_features hw_feat;
597
598 /* Device restart work structure */
599 struct work_struct restart_work;
600
601 /* Keeps track of power mode */
602 unsigned int power_down;
603
604#ifdef CONFIG_DEBUG_FS
605 struct dentry *xgbe_debugfs;
606
607 unsigned int debugfs_xgmac_reg;
608
609 unsigned int debugfs_xpcs_mmd;
610 unsigned int debugfs_xpcs_reg;
611#endif
612};
613
614/* Function prototypes*/
615
616void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
617void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
618struct net_device_ops *xgbe_get_netdev_ops(void);
619struct ethtool_ops *xgbe_get_ethtool_ops(void);
620
621int xgbe_mdio_register(struct xgbe_prv_data *);
622void xgbe_mdio_unregister(struct xgbe_prv_data *);
623void xgbe_dump_phy_registers(struct xgbe_prv_data *);
624void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
625 unsigned int);
626void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
627 unsigned int);
628void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
629void xgbe_get_all_hw_features(struct xgbe_prv_data *);
630int xgbe_powerup(struct net_device *, unsigned int);
631int xgbe_powerdown(struct net_device *, unsigned int);
632void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
633void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
634
635#ifdef CONFIG_DEBUG_FS
636void xgbe_debugfs_init(struct xgbe_prv_data *);
637void xgbe_debugfs_exit(struct xgbe_prv_data *);
638#else
639static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
640static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
641#endif /* CONFIG_DEBUG_FS */
642
643/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
644#if 0
645#define XGMAC_ENABLE_TX_DESC_DUMP
646#define XGMAC_ENABLE_RX_DESC_DUMP
647#endif
648
649/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
650#if 0
651#define XGMAC_ENABLE_TX_PKT_DUMP
652#define XGMAC_ENABLE_RX_PKT_DUMP
653#endif
654
655/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
656#if 0
657#define YDEBUG
658#define YDEBUG_MDIO
659#endif
660
661/* For debug prints */
662#ifdef YDEBUG
663#define DBGPR(x...) pr_alert(x)
664#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
665#else
666#define DBGPR(x...) do { } while (0)
667#define DBGPHY_REGS(x...) do { } while (0)
668#endif
669
670#ifdef YDEBUG_MDIO
671#define DBGPR_MDIO(x...) pr_alert(x)
672#else
673#define DBGPR_MDIO(x...) do { } while (0)
674#endif
675
676#endif
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d647a7d115ac..18e2faccebb0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -13,6 +13,7 @@
13 * Vineet Gupta 13 * Vineet Gupta
14 */ 14 */
15 15
16#include <linux/crc32.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
362 return IRQ_HANDLED; 363 return IRQ_HANDLED;
363} 364}
364 365
366#ifdef CONFIG_NET_POLL_CONTROLLER
367static void arc_emac_poll_controller(struct net_device *dev)
368{
369 disable_irq(dev->irq);
370 arc_emac_intr(dev->irq, dev);
371 enable_irq(dev->irq);
372}
373#endif
374
365/** 375/**
366 * arc_emac_open - Open the network device. 376 * arc_emac_open - Open the network device.
367 * @ndev: Pointer to the network device. 377 * @ndev: Pointer to the network device.
@@ -451,6 +461,41 @@ static int arc_emac_open(struct net_device *ndev)
451} 461}
452 462
453/** 463/**
464 * arc_emac_set_rx_mode - Change the receive filtering mode.
465 * @ndev: Pointer to the network device.
466 *
467 * This function enables/disables promiscuous or all-multicast mode
468 * and updates the multicast filtering list of the network device.
469 */
470static void arc_emac_set_rx_mode(struct net_device *ndev)
471{
472 struct arc_emac_priv *priv = netdev_priv(ndev);
473
474 if (ndev->flags & IFF_PROMISC) {
475 arc_reg_or(priv, R_CTRL, PROM_MASK);
476 } else {
477 arc_reg_clr(priv, R_CTRL, PROM_MASK);
478
479 if (ndev->flags & IFF_ALLMULTI) {
480 arc_reg_set(priv, R_LAFL, ~0);
481 arc_reg_set(priv, R_LAFH, ~0);
482 } else {
483 struct netdev_hw_addr *ha;
484 unsigned int filter[2] = { 0, 0 };
485 int bit;
486
487 netdev_for_each_mc_addr(ha, ndev) {
488 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
489 filter[bit >> 5] |= 1 << (bit & 31);
490 }
491
492 arc_reg_set(priv, R_LAFL, filter[0]);
493 arc_reg_set(priv, R_LAFH, filter[1]);
494 }
495 }
496}
497
498/**
454 * arc_emac_stop - Close the network device. 499 * arc_emac_stop - Close the network device.
455 * @ndev: Pointer to the network device. 500 * @ndev: Pointer to the network device.
456 * 501 *
@@ -620,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
620 .ndo_start_xmit = arc_emac_tx, 665 .ndo_start_xmit = arc_emac_tx,
621 .ndo_set_mac_address = arc_emac_set_address, 666 .ndo_set_mac_address = arc_emac_set_address,
622 .ndo_get_stats = arc_emac_stats, 667 .ndo_get_stats = arc_emac_stats,
668 .ndo_set_rx_mode = arc_emac_set_rx_mode,
669#ifdef CONFIG_NET_POLL_CONTROLLER
670 .ndo_poll_controller = arc_emac_poll_controller,
671#endif
623}; 672};
624 673
625static int arc_emac_probe(struct platform_device *pdev) 674static int arc_emac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 17bb9ce96260..49faa97a30c3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1302 } 1302 }
1303 1303
1304 netdev->netdev_ops = &alx_netdev_ops; 1304 netdev->netdev_ops = &alx_netdev_ops;
1305 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); 1305 netdev->ethtool_ops = &alx_ethtool_ops;
1306 netdev->irq = pdev->irq; 1306 netdev->irq = pdev->irq;
1307 netdev->watchdog_timeo = ALX_WATCHDOG_TIME; 1307 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1308 1308
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 859ea844ba0f..48694c239d5c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -56,8 +56,8 @@ static int atl1c_get_settings(struct net_device *netdev,
56 else 56 else
57 ecmd->duplex = DUPLEX_HALF; 57 ecmd->duplex = DUPLEX_HALF;
58 } else { 58 } else {
59 ethtool_cmd_speed_set(ecmd, -1); 59 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
60 ecmd->duplex = -1; 60 ecmd->duplex = DUPLEX_UNKNOWN;
61 } 61 }
62 62
63 ecmd->autoneg = AUTONEG_ENABLE; 63 ecmd->autoneg = AUTONEG_ENABLE;
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
305 305
306void atl1c_set_ethtool_ops(struct net_device *netdev) 306void atl1c_set_ethtool_ops(struct net_device *netdev)
307{ 307{
308 SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops); 308 netdev->ethtool_ops = &atl1c_ethtool_ops;
309} 309}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 82b23861bf55..1be072f4afc2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -57,8 +57,8 @@ static int atl1e_get_settings(struct net_device *netdev,
57 else 57 else
58 ecmd->duplex = DUPLEX_HALF; 58 ecmd->duplex = DUPLEX_HALF;
59 } else { 59 } else {
60 ethtool_cmd_speed_set(ecmd, -1); 60 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
61 ecmd->duplex = -1; 61 ecmd->duplex = DUPLEX_UNKNOWN;
62 } 62 }
63 63
64 ecmd->autoneg = AUTONEG_ENABLE; 64 ecmd->autoneg = AUTONEG_ENABLE;
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
388 388
389void atl1e_set_ethtool_ops(struct net_device *netdev) 389void atl1e_set_ethtool_ops(struct net_device *netdev)
390{ 390{
391 SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); 391 netdev->ethtool_ops = &atl1e_ethtool_ops;
392} 392}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index dfd0e91fa726..b460db7919a2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3258,8 +3258,8 @@ static int atl1_get_settings(struct net_device *netdev,
3258 else 3258 else
3259 ecmd->duplex = DUPLEX_HALF; 3259 ecmd->duplex = DUPLEX_HALF;
3260 } else { 3260 } else {
3261 ethtool_cmd_speed_set(ecmd, -1); 3261 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
3262 ecmd->duplex = -1; 3262 ecmd->duplex = DUPLEX_UNKNOWN;
3263 } 3263 }
3264 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3264 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3265 hw->media_type == MEDIA_TYPE_1000M_FULL) 3265 hw->media_type == MEDIA_TYPE_1000M_FULL)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 78befb522a52..6746bd717146 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1396 atl2_setup_pcicmd(pdev); 1396 atl2_setup_pcicmd(pdev);
1397 1397
1398 netdev->netdev_ops = &atl2_netdev_ops; 1398 netdev->netdev_ops = &atl2_netdev_ops;
1399 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); 1399 netdev->ethtool_ops = &atl2_ethtool_ops;
1400 netdev->watchdog_timeo = 5 * HZ; 1400 netdev->watchdog_timeo = 5 * HZ;
1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1402 1402
@@ -1769,8 +1769,8 @@ static int atl2_get_settings(struct net_device *netdev,
1769 else 1769 else
1770 ecmd->duplex = DUPLEX_HALF; 1770 ecmd->duplex = DUPLEX_HALF;
1771 } else { 1771 } else {
1772 ethtool_cmd_speed_set(ecmd, -1); 1772 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
1773 ecmd->duplex = -1; 1773 ecmd->duplex = DUPLEX_UNKNOWN;
1774 } 1774 }
1775 1775
1776 ecmd->autoneg = AUTONEG_ENABLE; 1776 ecmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
150 In case of using this driver on BCM4706 it's also requires to enable 150 In case of using this driver on BCM4706 it's also requires to enable
151 BCMA_DRIVER_GMAC_CMN to make it work. 151 BCMA_DRIVER_GMAC_CMN to make it work.
152 152
153config SYSTEMPORT
154 tristate "Broadcom SYSTEMPORT internal MAC support"
155 depends on OF
156 select MII
157 select PHYLIB
158 select FIXED_PHY if SYSTEMPORT=y
159 help
160 This driver supports the built-in Ethernet MACs found in the
161 Broadcom BCM7xxx Set Top Box family chipset using an internal
162 Ethernet switch.
163
153endif # NET_VENDOR_BROADCOM 164endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
11obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 11obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
12obj-$(CONFIG_TIGON3) += tg3.o 12obj-$(CONFIG_TIGON3) += tg3.o
13obj-$(CONFIG_BGMAC) += bgmac.o 13obj-$(CONFIG_BGMAC) += bgmac.o
14obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 05ba62589017..ca5a20a48b14 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
2380 netif_napi_add(dev, &bp->napi, b44_poll, 64); 2380 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2381 dev->watchdog_timeo = B44_TX_TIMEOUT; 2381 dev->watchdog_timeo = B44_TX_TIMEOUT;
2382 dev->irq = sdev->irq; 2382 dev->irq = sdev->irq;
2383 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2383 dev->ethtool_ops = &b44_ethtool_ops;
2384 2384
2385 err = ssb_bus_powerup(sdev->bus, 0); 2385 err = ssb_bus_powerup(sdev->bus, 0);
2386 if (err) { 2386 if (err) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..3e8d1a88ed3d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1315 1315
1316}; 1316};
1317 1317
1318#define BCM_ENET_STATS_LEN \ 1318#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1319 (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1320 1319
1321static const u32 unused_mib_regs[] = { 1320static const u32 unused_mib_regs[] = {
1322 ETH_MIB_TX_ALL_OCTETS, 1321 ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1898 dev->netdev_ops = &bcm_enet_ops; 1897 dev->netdev_ops = &bcm_enet_ops;
1899 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1898 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1900 1899
1901 SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); 1900 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1902 SET_NETDEV_DEV(dev, &pdev->dev); 1901 SET_NETDEV_DEV(dev, &pdev->dev);
1903 1902
1904 ret = register_netdev(dev); 1903 ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2784 /* register netdevice */ 2783 /* register netdevice */
2785 dev->netdev_ops = &bcm_enetsw_ops; 2784 dev->netdev_ops = &bcm_enetsw_ops;
2786 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 2785 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2787 SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); 2786 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2788 SET_NETDEV_DEV(dev, &pdev->dev); 2787 SET_NETDEV_DEV(dev, &pdev->dev);
2789 2788
2790 spin_lock_init(&priv->enetsw_mdio_lock); 2789 spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..141160ef249a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1654 @@
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27
28#include "bcmsysport.h"
29
30/* I/O accessors register helpers */
31#define BCM_SYSPORT_IO_MACRO(name, offset) \
32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
33{ \
34 u32 reg = __raw_readl(priv->base + offset + off); \
35 return reg; \
36} \
37static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 u32 val, u32 off) \
39{ \
40 __raw_writel(val, priv->base + offset + off); \
41} \
42
43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
54/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
56 */
57#define BCM_SYSPORT_INTR_L2(which) \
58static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
59 u32 mask) \
60{ \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
63} \
64static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
65 u32 mask) \
66{ \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
69} \
70
71BCM_SYSPORT_INTR_L2(0)
72BCM_SYSPORT_INTR_L2(1)
73
74/* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
77 */
78static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
79 void __iomem *d,
80 dma_addr_t addr)
81{
82#ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 d + DESC_ADDR_HI_STATUS_LEN);
85#endif
86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
87}
88
89static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 struct dma_desc *desc,
91 unsigned int port)
92{
93 /* Ports are latched, so write upper address first */
94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
96}
97
98/* Ethtool operations */
99static int bcm_sysport_set_settings(struct net_device *dev,
100 struct ethtool_cmd *cmd)
101{
102 struct bcm_sysport_priv *priv = netdev_priv(dev);
103
104 if (!netif_running(dev))
105 return -EINVAL;
106
107 return phy_ethtool_sset(priv->phydev, cmd);
108}
109
110static int bcm_sysport_get_settings(struct net_device *dev,
111 struct ethtool_cmd *cmd)
112{
113 struct bcm_sysport_priv *priv = netdev_priv(dev);
114
115 if (!netif_running(dev))
116 return -EINVAL;
117
118 return phy_ethtool_gset(priv->phydev, cmd);
119}
120
121static int bcm_sysport_set_rx_csum(struct net_device *dev,
122 netdev_features_t wanted)
123{
124 struct bcm_sysport_priv *priv = netdev_priv(dev);
125 u32 reg;
126
127 priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
128 reg = rxchk_readl(priv, RXCHK_CONTROL);
129 if (priv->rx_csum_en)
130 reg |= RXCHK_EN;
131 else
132 reg &= ~RXCHK_EN;
133
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
136 */
137 if (priv->rx_csum_en && priv->crc_fwd)
138 reg |= RXCHK_SKIP_FCS;
139 else
140 reg &= ~RXCHK_SKIP_FCS;
141
142 rxchk_writel(priv, reg, RXCHK_CONTROL);
143
144 return 0;
145}
146
147static int bcm_sysport_set_tx_csum(struct net_device *dev,
148 netdev_features_t wanted)
149{
150 struct bcm_sysport_priv *priv = netdev_priv(dev);
151 u32 reg;
152
153 /* Hardware transmit checksum requires us to enable the Transmit status
154 * block prepended to the packet contents
155 */
156 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
157 reg = tdma_readl(priv, TDMA_CONTROL);
158 if (priv->tsb_en)
159 reg |= TSB_EN;
160 else
161 reg &= ~TSB_EN;
162 tdma_writel(priv, reg, TDMA_CONTROL);
163
164 return 0;
165}
166
167static int bcm_sysport_set_features(struct net_device *dev,
168 netdev_features_t features)
169{
170 netdev_features_t changed = features ^ dev->features;
171 netdev_features_t wanted = dev->wanted_features;
172 int ret = 0;
173
174 if (changed & NETIF_F_RXCSUM)
175 ret = bcm_sysport_set_rx_csum(dev, wanted);
176 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
177 ret = bcm_sysport_set_tx_csum(dev, wanted);
178
179 return ret;
180}
181
182/* Hardware counters must be kept in sync because the order/offset
183 * is important here (order in structure declaration = order in hardware)
184 */
185static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
186 /* general stats */
187 STAT_NETDEV(rx_packets),
188 STAT_NETDEV(tx_packets),
189 STAT_NETDEV(rx_bytes),
190 STAT_NETDEV(tx_bytes),
191 STAT_NETDEV(rx_errors),
192 STAT_NETDEV(tx_errors),
193 STAT_NETDEV(rx_dropped),
194 STAT_NETDEV(tx_dropped),
195 STAT_NETDEV(multicast),
196 /* UniMAC RSV counters */
197 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
198 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
199 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
200 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
201 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
202 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
203 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
204 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
205 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
206 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
207 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
208 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
209 STAT_MIB_RX("rx_multicast", mib.rx.mca),
210 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
211 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
212 STAT_MIB_RX("rx_control", mib.rx.cf),
213 STAT_MIB_RX("rx_pause", mib.rx.pf),
214 STAT_MIB_RX("rx_unknown", mib.rx.uo),
215 STAT_MIB_RX("rx_align", mib.rx.aln),
216 STAT_MIB_RX("rx_outrange", mib.rx.flr),
217 STAT_MIB_RX("rx_code", mib.rx.cde),
218 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
219 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
220 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
221 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
222 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
223 STAT_MIB_RX("rx_unicast", mib.rx.uc),
224 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
225 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
226 /* UniMAC TSV counters */
227 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
228 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
229 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
230 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
231 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
232 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
233 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
234 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
235 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
236 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
237 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
238 STAT_MIB_TX("tx_multicast", mib.tx.mca),
239 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
240 STAT_MIB_TX("tx_pause", mib.tx.pf),
241 STAT_MIB_TX("tx_control", mib.tx.cf),
242 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
243 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
244 STAT_MIB_TX("tx_defer", mib.tx.drf),
245 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
246 STAT_MIB_TX("tx_single_col", mib.tx.scl),
247 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
248 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
249 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
250 STAT_MIB_TX("tx_frags", mib.tx.frg),
251 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
252 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
253 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
254 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
255 STAT_MIB_TX("tx_unicast", mib.tx.uc),
256 /* UniMAC RUNT counters */
257 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
258 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
259 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
260 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
261 /* RXCHK misc statistics */
262 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
263 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
264 RXCHK_OTHER_DISC_CNTR),
265 /* RBUF misc statistics */
266 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
267 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
268};
269
270#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
271
272static void bcm_sysport_get_drvinfo(struct net_device *dev,
273 struct ethtool_drvinfo *info)
274{
275 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
276 strlcpy(info->version, "0.1", sizeof(info->version));
277 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
278 info->n_stats = BCM_SYSPORT_STATS_LEN;
279}
280
281static u32 bcm_sysport_get_msglvl(struct net_device *dev)
282{
283 struct bcm_sysport_priv *priv = netdev_priv(dev);
284
285 return priv->msg_enable;
286}
287
288static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
289{
290 struct bcm_sysport_priv *priv = netdev_priv(dev);
291
292 priv->msg_enable = enable;
293}
294
295static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
296{
297 switch (string_set) {
298 case ETH_SS_STATS:
299 return BCM_SYSPORT_STATS_LEN;
300 default:
301 return -EOPNOTSUPP;
302 }
303}
304
305static void bcm_sysport_get_strings(struct net_device *dev,
306 u32 stringset, u8 *data)
307{
308 int i;
309
310 switch (stringset) {
311 case ETH_SS_STATS:
312 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
313 memcpy(data + i * ETH_GSTRING_LEN,
314 bcm_sysport_gstrings_stats[i].stat_string,
315 ETH_GSTRING_LEN);
316 }
317 break;
318 default:
319 break;
320 }
321}
322
323static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
324{
325 int i, j = 0;
326
327 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
328 const struct bcm_sysport_stats *s;
329 u8 offset = 0;
330 u32 val = 0;
331 char *p;
332
333 s = &bcm_sysport_gstrings_stats[i];
334 switch (s->type) {
335 case BCM_SYSPORT_STAT_NETDEV:
336 continue;
337 case BCM_SYSPORT_STAT_MIB_RX:
338 case BCM_SYSPORT_STAT_MIB_TX:
339 case BCM_SYSPORT_STAT_RUNT:
340 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
341 offset = UMAC_MIB_STAT_OFFSET;
342 val = umac_readl(priv, UMAC_MIB_START + j + offset);
343 break;
344 case BCM_SYSPORT_STAT_RXCHK:
345 val = rxchk_readl(priv, s->reg_offset);
346 if (val == ~0)
347 rxchk_writel(priv, 0, s->reg_offset);
348 break;
349 case BCM_SYSPORT_STAT_RBUF:
350 val = rbuf_readl(priv, s->reg_offset);
351 if (val == ~0)
352 rbuf_writel(priv, 0, s->reg_offset);
353 break;
354 }
355
356 j += s->stat_sizeof;
357 p = (char *)priv + s->stat_offset;
358 *(u32 *)p = val;
359 }
360
361 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
362}
363
364static void bcm_sysport_get_stats(struct net_device *dev,
365 struct ethtool_stats *stats, u64 *data)
366{
367 struct bcm_sysport_priv *priv = netdev_priv(dev);
368 int i;
369
370 if (netif_running(dev))
371 bcm_sysport_update_mib_counters(priv);
372
373 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
374 const struct bcm_sysport_stats *s;
375 char *p;
376
377 s = &bcm_sysport_gstrings_stats[i];
378 if (s->type == BCM_SYSPORT_STAT_NETDEV)
379 p = (char *)&dev->stats;
380 else
381 p = (char *)priv;
382 p += s->stat_offset;
383 data[i] = *(u32 *)p;
384 }
385}
386
387static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
388{
389 dev_kfree_skb_any(cb->skb);
390 cb->skb = NULL;
391 dma_unmap_addr_set(cb, dma_addr, 0);
392}
393
394static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
395 struct bcm_sysport_cb *cb)
396{
397 struct device *kdev = &priv->pdev->dev;
398 struct net_device *ndev = priv->netdev;
399 dma_addr_t mapping;
400 int ret;
401
402 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
403 if (!cb->skb) {
404 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
405 return -ENOMEM;
406 }
407
408 mapping = dma_map_single(kdev, cb->skb->data,
409 RX_BUF_LENGTH, DMA_FROM_DEVICE);
410 ret = dma_mapping_error(kdev, mapping);
411 if (ret) {
412 bcm_sysport_free_cb(cb);
413 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
414 return ret;
415 }
416
417 dma_unmap_addr_set(cb, dma_addr, mapping);
418 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
419
420 priv->rx_bd_assign_index++;
421 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
422 priv->rx_bd_assign_ptr = priv->rx_bds +
423 (priv->rx_bd_assign_index * DESC_SIZE);
424
425 netif_dbg(priv, rx_status, ndev, "RX refill\n");
426
427 return 0;
428}
429
430static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
431{
432 struct bcm_sysport_cb *cb;
433 int ret = 0;
434 unsigned int i;
435
436 for (i = 0; i < priv->num_rx_bds; i++) {
437 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
438 if (cb->skb)
439 continue;
440
441 ret = bcm_sysport_rx_refill(priv, cb);
442 if (ret)
443 break;
444 }
445
446 return ret;
447}
448
449/* Poll the hardware for up to budget packets to process */
450static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
451 unsigned int budget)
452{
453 struct device *kdev = &priv->pdev->dev;
454 struct net_device *ndev = priv->netdev;
455 unsigned int processed = 0, to_process;
456 struct bcm_sysport_cb *cb;
457 struct sk_buff *skb;
458 unsigned int p_index;
459 u16 len, status;
460 struct bcm_rsb *rsb;
461
462 /* Determine how much we should process since last call */
463 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
464 p_index &= RDMA_PROD_INDEX_MASK;
465
466 if (p_index < priv->rx_c_index)
467 to_process = (RDMA_CONS_INDEX_MASK + 1) -
468 priv->rx_c_index + p_index;
469 else
470 to_process = p_index - priv->rx_c_index;
471
472 netif_dbg(priv, rx_status, ndev,
473 "p_index=%d rx_c_index=%d to_process=%d\n",
474 p_index, priv->rx_c_index, to_process);
475
476 while ((processed < to_process) &&
477 (processed < budget)) {
478
479 cb = &priv->rx_cbs[priv->rx_read_ptr];
480 skb = cb->skb;
481 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
482 RX_BUF_LENGTH, DMA_FROM_DEVICE);
483
484 /* Extract the Receive Status Block prepended */
485 rsb = (struct bcm_rsb *)skb->data;
486 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
487 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
488 DESC_STATUS_MASK;
489
490 processed++;
491 priv->rx_read_ptr++;
492 if (priv->rx_read_ptr == priv->num_rx_bds)
493 priv->rx_read_ptr = 0;
494
495 netif_dbg(priv, rx_status, ndev,
496 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
497 p_index, priv->rx_c_index, priv->rx_read_ptr,
498 len, status);
499
500 if (unlikely(!skb)) {
501 netif_err(priv, rx_err, ndev, "out of memory!\n");
502 ndev->stats.rx_dropped++;
503 ndev->stats.rx_errors++;
504 goto refill;
505 }
506
507 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
508 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
509 ndev->stats.rx_dropped++;
510 ndev->stats.rx_errors++;
511 bcm_sysport_free_cb(cb);
512 goto refill;
513 }
514
515 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
516 netif_err(priv, rx_err, ndev, "error packet\n");
517 if (status & RX_STATUS_OVFLOW)
518 ndev->stats.rx_over_errors++;
519 ndev->stats.rx_dropped++;
520 ndev->stats.rx_errors++;
521 bcm_sysport_free_cb(cb);
522 goto refill;
523 }
524
525 skb_put(skb, len);
526
527 /* Hardware validated our checksum */
528 if (likely(status & DESC_L4_CSUM))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
530
531 /* Hardware pre-pends packets with 2bytes before Ethernet
532 * header plus we have the Receive Status Block, strip off all
533 * of this from the SKB.
534 */
535 skb_pull(skb, sizeof(*rsb) + 2);
536 len -= (sizeof(*rsb) + 2);
537
538 /* UniMAC may forward CRC */
539 if (priv->crc_fwd) {
540 skb_trim(skb, len - ETH_FCS_LEN);
541 len -= ETH_FCS_LEN;
542 }
543
544 skb->protocol = eth_type_trans(skb, ndev);
545 ndev->stats.rx_packets++;
546 ndev->stats.rx_bytes += len;
547
548 napi_gro_receive(&priv->napi, skb);
549refill:
550 bcm_sysport_rx_refill(priv, cb);
551 }
552
553 return processed;
554}
555
556static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
557 struct bcm_sysport_cb *cb,
558 unsigned int *bytes_compl,
559 unsigned int *pkts_compl)
560{
561 struct device *kdev = &priv->pdev->dev;
562 struct net_device *ndev = priv->netdev;
563
564 if (cb->skb) {
565 ndev->stats.tx_bytes += cb->skb->len;
566 *bytes_compl += cb->skb->len;
567 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
568 dma_unmap_len(cb, dma_len),
569 DMA_TO_DEVICE);
570 ndev->stats.tx_packets++;
571 (*pkts_compl)++;
572 bcm_sysport_free_cb(cb);
573 /* SKB fragment */
574 } else if (dma_unmap_addr(cb, dma_addr)) {
575 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
576 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
577 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
578 dma_unmap_addr_set(cb, dma_addr, 0);
579 }
580}
581
582/* Reclaim queued SKBs for transmission completion, lockless version */
583static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
584 struct bcm_sysport_tx_ring *ring)
585{
586 struct net_device *ndev = priv->netdev;
587 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
588 unsigned int pkts_compl = 0, bytes_compl = 0;
589 struct bcm_sysport_cb *cb;
590 struct netdev_queue *txq;
591 u32 hw_ind;
592
593 txq = netdev_get_tx_queue(ndev, ring->index);
594
595 /* Compute how many descriptors have been processed since last call */
596 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
597 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
598 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
599
600 last_c_index = ring->c_index;
601 num_tx_cbs = ring->size;
602
603 c_index &= (num_tx_cbs - 1);
604
605 if (c_index >= last_c_index)
606 last_tx_cn = c_index - last_c_index;
607 else
608 last_tx_cn = num_tx_cbs - last_c_index + c_index;
609
610 netif_dbg(priv, tx_done, ndev,
611 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
612 ring->index, c_index, last_tx_cn, last_c_index);
613
614 while (last_tx_cn-- > 0) {
615 cb = ring->cbs + last_c_index;
616 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
617
618 ring->desc_count++;
619 last_c_index++;
620 last_c_index &= (num_tx_cbs - 1);
621 }
622
623 ring->c_index = c_index;
624
625 if (netif_tx_queue_stopped(txq) && pkts_compl)
626 netif_tx_wake_queue(txq);
627
628 netif_dbg(priv, tx_done, ndev,
629 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
630 ring->index, ring->c_index, pkts_compl, bytes_compl);
631
632 return pkts_compl;
633}
634
635/* Locked version of the per-ring TX reclaim routine */
636static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
637 struct bcm_sysport_tx_ring *ring)
638{
639 unsigned int released;
640 unsigned long flags;
641
642 spin_lock_irqsave(&ring->lock, flags);
643 released = __bcm_sysport_tx_reclaim(priv, ring);
644 spin_unlock_irqrestore(&ring->lock, flags);
645
646 return released;
647}
648
649static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
650{
651 struct bcm_sysport_tx_ring *ring =
652 container_of(napi, struct bcm_sysport_tx_ring, napi);
653 unsigned int work_done = 0;
654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656
657 if (work_done < budget) {
658 napi_complete(napi);
659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 }
662
663 return work_done;
664}
665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
667{
668 unsigned int q;
669
670 for (q = 0; q < priv->netdev->num_tx_queues; q++)
671 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
672}
673
674static int bcm_sysport_poll(struct napi_struct *napi, int budget)
675{
676 struct bcm_sysport_priv *priv =
677 container_of(napi, struct bcm_sysport_priv, napi);
678 unsigned int work_done = 0;
679
680 work_done = bcm_sysport_desc_rx(priv, budget);
681
682 priv->rx_c_index += work_done;
683 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
684 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
685
686 if (work_done < budget) {
687 napi_complete(napi);
688 /* re-enable RX interrupts */
689 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
690 }
691
692 return work_done;
693}
694
695
696/* RX and misc interrupt routine */
697static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
698{
699 struct net_device *dev = dev_id;
700 struct bcm_sysport_priv *priv = netdev_priv(dev);
701
702 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
703 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
704 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
705
706 if (unlikely(priv->irq0_stat == 0)) {
707 netdev_warn(priv->netdev, "spurious RX interrupt\n");
708 return IRQ_NONE;
709 }
710
711 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
712 if (likely(napi_schedule_prep(&priv->napi))) {
713 /* disable RX interrupts */
714 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
715 __napi_schedule(&priv->napi);
716 }
717 }
718
719 /* TX ring is full, perform a full reclaim since we do not know
720 * which one would trigger this interrupt
721 */
722 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
723 bcm_sysport_tx_reclaim_all(priv);
724
725 return IRQ_HANDLED;
726}
727
728/* TX interrupt service routine */
729static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
730{
731 struct net_device *dev = dev_id;
732 struct bcm_sysport_priv *priv = netdev_priv(dev);
733 struct bcm_sysport_tx_ring *txr;
734 unsigned int ring;
735
736 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
737 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
738 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
739
740 if (unlikely(priv->irq1_stat == 0)) {
741 netdev_warn(priv->netdev, "spurious TX interrupt\n");
742 return IRQ_NONE;
743 }
744
745 for (ring = 0; ring < dev->num_tx_queues; ring++) {
746 if (!(priv->irq1_stat & BIT(ring)))
747 continue;
748
749 txr = &priv->tx_rings[ring];
750
751 if (likely(napi_schedule_prep(&txr->napi))) {
752 intrl2_1_mask_set(priv, BIT(ring));
753 __napi_schedule(&txr->napi);
754 }
755 }
756
757 return IRQ_HANDLED;
758}
759
760static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
761{
762 struct sk_buff *nskb;
763 struct bcm_tsb *tsb;
764 u32 csum_info;
765 u8 ip_proto;
766 u16 csum_start;
767 u16 ip_ver;
768
769 /* Re-allocate SKB if needed */
770 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
771 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
772 dev_kfree_skb(skb);
773 if (!nskb) {
774 dev->stats.tx_errors++;
775 dev->stats.tx_dropped++;
776 return -ENOMEM;
777 }
778 skb = nskb;
779 }
780
781 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
782 /* Zero-out TSB by default */
783 memset(tsb, 0, sizeof(*tsb));
784
785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
786 ip_ver = htons(skb->protocol);
787 switch (ip_ver) {
788 case ETH_P_IP:
789 ip_proto = ip_hdr(skb)->protocol;
790 break;
791 case ETH_P_IPV6:
792 ip_proto = ipv6_hdr(skb)->nexthdr;
793 break;
794 default:
795 return 0;
796 }
797
798 /* Get the checksum offset and the L4 (transport) offset */
799 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
800 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
801 csum_info |= (csum_start << L4_PTR_SHIFT);
802
803 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
804 csum_info |= L4_LENGTH_VALID;
805 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
806 csum_info |= L4_UDP;
807 } else
808 csum_info = 0;
809
810 tsb->l4_ptr_dest_map = csum_info;
811 }
812
813 return 0;
814}
815
816static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
817 struct net_device *dev)
818{
819 struct bcm_sysport_priv *priv = netdev_priv(dev);
820 struct device *kdev = &priv->pdev->dev;
821 struct bcm_sysport_tx_ring *ring;
822 struct bcm_sysport_cb *cb;
823 struct netdev_queue *txq;
824 struct dma_desc *desc;
825 unsigned int skb_len;
826 unsigned long flags;
827 dma_addr_t mapping;
828 u32 len_status;
829 u16 queue;
830 int ret;
831
832 queue = skb_get_queue_mapping(skb);
833 txq = netdev_get_tx_queue(dev, queue);
834 ring = &priv->tx_rings[queue];
835
836 /* lock against tx reclaim in BH context and TX ring full interrupt */
837 spin_lock_irqsave(&ring->lock, flags);
838 if (unlikely(ring->desc_count == 0)) {
839 netif_tx_stop_queue(txq);
840 netdev_err(dev, "queue %d awake and ring full!\n", queue);
841 ret = NETDEV_TX_BUSY;
842 goto out;
843 }
844
845 /* Insert TSB and checksum infos */
846 if (priv->tsb_en) {
847 ret = bcm_sysport_insert_tsb(skb, dev);
848 if (ret) {
849 ret = NETDEV_TX_OK;
850 goto out;
851 }
852 }
853
854 /* The Ethernet switch we are interfaced with needs packets to be at
855 * least 64 bytes (including FCS) otherwise they will be discarded when
856 * they enter the switch port logic. When Broadcom tags are enabled, we
857 * need to make sure that packets are at least 68 bytes
858 * (including FCS and tag) because the length verification is done after
859 * the Broadcom tag is stripped off the ingress packet.
860 */
861 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
862 ret = NETDEV_TX_OK;
863 goto out;
864 }
865
866 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
867 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
868
869 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
870 if (dma_mapping_error(kdev, mapping)) {
871 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
872 skb->data, skb_len);
873 ret = NETDEV_TX_OK;
874 goto out;
875 }
876
877 /* Remember the SKB for future freeing */
878 cb = &ring->cbs[ring->curr_desc];
879 cb->skb = skb;
880 dma_unmap_addr_set(cb, dma_addr, mapping);
881 dma_unmap_len_set(cb, dma_len, skb_len);
882
883 /* Fetch a descriptor entry from our pool */
884 desc = ring->desc_cpu;
885
886 desc->addr_lo = lower_32_bits(mapping);
887 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
888 len_status |= (skb_len << DESC_LEN_SHIFT);
889 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
890 DESC_STATUS_SHIFT;
891 if (skb->ip_summed == CHECKSUM_PARTIAL)
892 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
893
894 ring->curr_desc++;
895 if (ring->curr_desc == ring->size)
896 ring->curr_desc = 0;
897 ring->desc_count--;
898
899 /* Ensure write completion of the descriptor status/length
900 * in DRAM before the System Port WRITE_PORT register latches
901 * the value
902 */
903 wmb();
904 desc->addr_status_len = len_status;
905 wmb();
906
907 /* Write this descriptor address to the RING write port */
908 tdma_port_write_desc_addr(priv, desc, ring->index);
909
910 /* Check ring space and update SW control flow */
911 if (ring->desc_count == 0)
912 netif_tx_stop_queue(txq);
913
914 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
915 ring->index, ring->desc_count, ring->curr_desc);
916
917 ret = NETDEV_TX_OK;
918out:
919 spin_unlock_irqrestore(&ring->lock, flags);
920 return ret;
921}
922
923static void bcm_sysport_tx_timeout(struct net_device *dev)
924{
925 netdev_warn(dev, "transmit timeout!\n");
926
927 dev->trans_start = jiffies;
928 dev->stats.tx_errors++;
929
930 netif_tx_wake_all_queues(dev);
931}
932
933/* phylib adjust link callback */
934static void bcm_sysport_adj_link(struct net_device *dev)
935{
936 struct bcm_sysport_priv *priv = netdev_priv(dev);
937 struct phy_device *phydev = priv->phydev;
938 unsigned int changed = 0;
939 u32 cmd_bits = 0, reg;
940
941 if (priv->old_link != phydev->link) {
942 changed = 1;
943 priv->old_link = phydev->link;
944 }
945
946 if (priv->old_duplex != phydev->duplex) {
947 changed = 1;
948 priv->old_duplex = phydev->duplex;
949 }
950
951 switch (phydev->speed) {
952 case SPEED_2500:
953 cmd_bits = CMD_SPEED_2500;
954 break;
955 case SPEED_1000:
956 cmd_bits = CMD_SPEED_1000;
957 break;
958 case SPEED_100:
959 cmd_bits = CMD_SPEED_100;
960 break;
961 case SPEED_10:
962 cmd_bits = CMD_SPEED_10;
963 break;
964 default:
965 break;
966 }
967 cmd_bits <<= CMD_SPEED_SHIFT;
968
969 if (phydev->duplex == DUPLEX_HALF)
970 cmd_bits |= CMD_HD_EN;
971
972 if (priv->old_pause != phydev->pause) {
973 changed = 1;
974 priv->old_pause = phydev->pause;
975 }
976
977 if (!phydev->pause)
978 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
979
980 if (changed) {
981 reg = umac_readl(priv, UMAC_CMD);
982 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
983 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
984 CMD_TX_PAUSE_IGNORE);
985 reg |= cmd_bits;
986 umac_writel(priv, reg, UMAC_CMD);
987
988 phy_print_status(priv->phydev);
989 }
990}
991
992static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
993 unsigned int index)
994{
995 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
996 struct device *kdev = &priv->pdev->dev;
997 size_t size;
998 void *p;
999 u32 reg;
1000
1001 /* Simple descriptors partitioning for now */
1002 size = 256;
1003
1004 /* We just need one DMA descriptor which is DMA-able, since writing to
1005 * the port will allocate a new descriptor in its internal linked-list
1006 */
1007 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
1008 if (!p) {
1009 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1010 return -ENOMEM;
1011 }
1012
1013 ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
1014 if (!ring->cbs) {
1015 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1016 return -ENOMEM;
1017 }
1018
1019 /* Initialize SW view of the ring */
1020 spin_lock_init(&ring->lock);
1021 ring->priv = priv;
1022 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1023 ring->index = index;
1024 ring->size = size;
1025 ring->alloc_size = ring->size;
1026 ring->desc_cpu = p;
1027 ring->desc_count = ring->size;
1028 ring->curr_desc = 0;
1029
1030 /* Initialize HW ring */
1031 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1032 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1033 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1034 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1035 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1036 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1037
1038 /* Program the number of descriptors as MAX_THRESHOLD and half of
1039 * its size for the hysteresis trigger
1040 */
1041 tdma_writel(priv, ring->size |
1042 1 << RING_HYST_THRESH_SHIFT,
1043 TDMA_DESC_RING_MAX_HYST(index));
1044
1045 /* Enable the ring queue in the arbiter */
1046 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1047 reg |= (1 << index);
1048 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1049
1050 napi_enable(&ring->napi);
1051
1052 netif_dbg(priv, hw, priv->netdev,
1053 "TDMA cfg, size=%d, desc_cpu=%p\n",
1054 ring->size, ring->desc_cpu);
1055
1056 return 0;
1057}
1058
1059static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1060 unsigned int index)
1061{
1062 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1063 struct device *kdev = &priv->pdev->dev;
1064 u32 reg;
1065
1066 /* Caller should stop the TDMA engine */
1067 reg = tdma_readl(priv, TDMA_STATUS);
1068 if (!(reg & TDMA_DISABLED))
1069 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1070
1071 napi_disable(&ring->napi);
1072 netif_napi_del(&ring->napi);
1073
1074 bcm_sysport_tx_reclaim(priv, ring);
1075
1076 kfree(ring->cbs);
1077 ring->cbs = NULL;
1078
1079 if (ring->desc_dma) {
1080 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
1081 ring->desc_dma = 0;
1082 }
1083 ring->size = 0;
1084 ring->alloc_size = 0;
1085
1086 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1087}
1088
1089/* RDMA helper */
1090static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1091 unsigned int enable)
1092{
1093 unsigned int timeout = 1000;
1094 u32 reg;
1095
1096 reg = rdma_readl(priv, RDMA_CONTROL);
1097 if (enable)
1098 reg |= RDMA_EN;
1099 else
1100 reg &= ~RDMA_EN;
1101 rdma_writel(priv, reg, RDMA_CONTROL);
1102
1103 /* Poll for RMDA disabling completion */
1104 do {
1105 reg = rdma_readl(priv, RDMA_STATUS);
1106 if (!!(reg & RDMA_DISABLED) == !enable)
1107 return 0;
1108 usleep_range(1000, 2000);
1109 } while (timeout-- > 0);
1110
1111 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1112
1113 return -ETIMEDOUT;
1114}
1115
1116/* TDMA helper */
1117static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1118 unsigned int enable)
1119{
1120 unsigned int timeout = 1000;
1121 u32 reg;
1122
1123 reg = tdma_readl(priv, TDMA_CONTROL);
1124 if (enable)
1125 reg |= TDMA_EN;
1126 else
1127 reg &= ~TDMA_EN;
1128 tdma_writel(priv, reg, TDMA_CONTROL);
1129
1130 /* Poll for TMDA disabling completion */
1131 do {
1132 reg = tdma_readl(priv, TDMA_STATUS);
1133 if (!!(reg & TDMA_DISABLED) == !enable)
1134 return 0;
1135
1136 usleep_range(1000, 2000);
1137 } while (timeout-- > 0);
1138
1139 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1140
1141 return -ETIMEDOUT;
1142}
1143
1144static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1145{
1146 u32 reg;
1147 int ret;
1148
1149 /* Initialize SW view of the RX ring */
1150 priv->num_rx_bds = NUM_RX_DESC;
1151 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1152 priv->rx_bd_assign_ptr = priv->rx_bds;
1153 priv->rx_bd_assign_index = 0;
1154 priv->rx_c_index = 0;
1155 priv->rx_read_ptr = 0;
1156 priv->rx_cbs = kzalloc(priv->num_rx_bds *
1157 sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1158 if (!priv->rx_cbs) {
1159 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1160 return -ENOMEM;
1161 }
1162
1163 ret = bcm_sysport_alloc_rx_bufs(priv);
1164 if (ret) {
1165 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1166 return ret;
1167 }
1168
1169 /* Initialize HW, ensure RDMA is disabled */
1170 reg = rdma_readl(priv, RDMA_STATUS);
1171 if (!(reg & RDMA_DISABLED))
1172 rdma_enable_set(priv, 0);
1173
1174 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1175 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1176 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1177 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1178 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1179 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1180 /* Operate the queue in ring mode */
1181 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1182 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1183 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1184 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1185
1186 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1187
1188 netif_dbg(priv, hw, priv->netdev,
1189 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1190 priv->num_rx_bds, priv->rx_bds);
1191
1192 return 0;
1193}
1194
1195static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1196{
1197 struct bcm_sysport_cb *cb;
1198 unsigned int i;
1199 u32 reg;
1200
1201 /* Caller should ensure RDMA is disabled */
1202 reg = rdma_readl(priv, RDMA_STATUS);
1203 if (!(reg & RDMA_DISABLED))
1204 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1205
1206 for (i = 0; i < priv->num_rx_bds; i++) {
1207 cb = &priv->rx_cbs[i];
1208 if (dma_unmap_addr(cb, dma_addr))
1209 dma_unmap_single(&priv->pdev->dev,
1210 dma_unmap_addr(cb, dma_addr),
1211 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1212 bcm_sysport_free_cb(cb);
1213 }
1214
1215 kfree(priv->rx_cbs);
1216 priv->rx_cbs = NULL;
1217
1218 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1219}
1220
1221static void bcm_sysport_set_rx_mode(struct net_device *dev)
1222{
1223 struct bcm_sysport_priv *priv = netdev_priv(dev);
1224 u32 reg;
1225
1226 reg = umac_readl(priv, UMAC_CMD);
1227 if (dev->flags & IFF_PROMISC)
1228 reg |= CMD_PROMISC;
1229 else
1230 reg &= ~CMD_PROMISC;
1231 umac_writel(priv, reg, UMAC_CMD);
1232
1233 /* No support for ALLMULTI */
1234 if (dev->flags & IFF_ALLMULTI)
1235 return;
1236}
1237
1238static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1239 unsigned int enable)
1240{
1241 u32 reg;
1242
1243 reg = umac_readl(priv, UMAC_CMD);
1244 if (enable)
1245 reg |= CMD_RX_EN | CMD_TX_EN;
1246 else
1247 reg &= ~(CMD_RX_EN | CMD_TX_EN);
1248 umac_writel(priv, reg, UMAC_CMD);
1249
1250 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1251 * to be processed (1 msec).
1252 */
1253 if (enable == 0)
1254 usleep_range(1000, 2000);
1255}
1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv)
1258{
1259 unsigned int timeout = 0;
1260 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277
1278 return ret;
1279}
1280
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1282 unsigned char *addr)
1283{
1284 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1285 (addr[2] << 8) | addr[3], UMAC_MAC0);
1286 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1287}
1288
1289static void topctrl_flush(struct bcm_sysport_priv *priv)
1290{
1291 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1292 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1293 mdelay(1);
1294 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1295 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1296}
1297
1298static int bcm_sysport_open(struct net_device *dev)
1299{
1300 struct bcm_sysport_priv *priv = netdev_priv(dev);
1301 unsigned int i;
1302 u32 reg;
1303 int ret;
1304
1305 /* Reset UniMAC */
1306 ret = umac_reset(priv);
1307 if (ret) {
1308 netdev_err(dev, "UniMAC reset failed\n");
1309 return ret;
1310 }
1311
1312 /* Flush TX and RX FIFOs at TOPCTRL level */
1313 topctrl_flush(priv);
1314
1315 /* Disable the UniMAC RX/TX */
1316 umac_enable_set(priv, 0);
1317
1318 /* Enable RBUF 2bytes alignment and Receive Status Block */
1319 reg = rbuf_readl(priv, RBUF_CONTROL);
1320 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1321 rbuf_writel(priv, reg, RBUF_CONTROL);
1322
1323 /* Set maximum frame length */
1324 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1325
1326 /* Set MAC address */
1327 umac_set_hw_addr(priv, dev->dev_addr);
1328
1329 /* Read CRC forward */
1330 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1331
1332 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1333 0, priv->phy_interface);
1334 if (!priv->phydev) {
1335 netdev_err(dev, "could not attach to PHY\n");
1336 return -ENODEV;
1337 }
1338
1339 /* Reset house keeping link status */
1340 priv->old_duplex = -1;
1341 priv->old_link = -1;
1342 priv->old_pause = -1;
1343
1344 /* mask all interrupts and request them */
1345 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1346 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1347 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1348 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1349 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1350 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1351
1352 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1353 if (ret) {
1354 netdev_err(dev, "failed to request RX interrupt\n");
1355 goto out_phy_disconnect;
1356 }
1357
1358 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1359 if (ret) {
1360 netdev_err(dev, "failed to request TX interrupt\n");
1361 goto out_free_irq0;
1362 }
1363
1364 /* Initialize both hardware and software ring */
1365 for (i = 0; i < dev->num_tx_queues; i++) {
1366 ret = bcm_sysport_init_tx_ring(priv, i);
1367 if (ret) {
1368 netdev_err(dev, "failed to initialize TX ring %d\n",
1369 i);
1370 goto out_free_tx_ring;
1371 }
1372 }
1373
1374 /* Initialize linked-list */
1375 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1376
1377 /* Initialize RX ring */
1378 ret = bcm_sysport_init_rx_ring(priv);
1379 if (ret) {
1380 netdev_err(dev, "failed to initialize RX ring\n");
1381 goto out_free_rx_ring;
1382 }
1383
1384 /* Turn on RDMA */
1385 ret = rdma_enable_set(priv, 1);
1386 if (ret)
1387 goto out_free_rx_ring;
1388
1389 /* Enable RX interrupt and TX ring full interrupt */
1390 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1391
1392 /* Turn on TDMA */
1393 ret = tdma_enable_set(priv, 1);
1394 if (ret)
1395 goto out_clear_rx_int;
1396
1397 /* Enable NAPI */
1398 napi_enable(&priv->napi);
1399
1400 /* Turn on UniMAC TX/RX */
1401 umac_enable_set(priv, 1);
1402
1403 phy_start(priv->phydev);
1404
1405 /* Enable TX interrupts for the 32 TXQs */
1406 intrl2_1_mask_clear(priv, 0xffffffff);
1407
1408 /* Last call before we start the real business */
1409 netif_tx_start_all_queues(dev);
1410
1411 return 0;
1412
1413out_clear_rx_int:
1414 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1415out_free_rx_ring:
1416 bcm_sysport_fini_rx_ring(priv);
1417out_free_tx_ring:
1418 for (i = 0; i < dev->num_tx_queues; i++)
1419 bcm_sysport_fini_tx_ring(priv, i);
1420 free_irq(priv->irq1, dev);
1421out_free_irq0:
1422 free_irq(priv->irq0, dev);
1423out_phy_disconnect:
1424 phy_disconnect(priv->phydev);
1425 return ret;
1426}
1427
1428static int bcm_sysport_stop(struct net_device *dev)
1429{
1430 struct bcm_sysport_priv *priv = netdev_priv(dev);
1431 unsigned int i;
1432 u32 reg;
1433 int ret;
1434
1435 /* stop all software from updating hardware */
1436 netif_tx_stop_all_queues(dev);
1437 napi_disable(&priv->napi);
1438 phy_stop(priv->phydev);
1439
1440 /* mask all interrupts */
1441 intrl2_0_mask_set(priv, 0xffffffff);
1442 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1443 intrl2_1_mask_set(priv, 0xffffffff);
1444 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1445
1446 /* Disable UniMAC RX */
1447 reg = umac_readl(priv, UMAC_CMD);
1448 reg &= ~CMD_RX_EN;
1449 umac_writel(priv, reg, UMAC_CMD);
1450
1451 ret = tdma_enable_set(priv, 0);
1452 if (ret) {
1453 netdev_err(dev, "timeout disabling RDMA\n");
1454 return ret;
1455 }
1456
1457 /* Wait for a maximum packet size to be drained */
1458 usleep_range(2000, 3000);
1459
1460 ret = rdma_enable_set(priv, 0);
1461 if (ret) {
1462 netdev_err(dev, "timeout disabling TDMA\n");
1463 return ret;
1464 }
1465
1466 /* Disable UniMAC TX */
1467 reg = umac_readl(priv, UMAC_CMD);
1468 reg &= ~CMD_TX_EN;
1469 umac_writel(priv, reg, UMAC_CMD);
1470
1471 /* Free RX/TX rings SW structures */
1472 for (i = 0; i < dev->num_tx_queues; i++)
1473 bcm_sysport_fini_tx_ring(priv, i);
1474 bcm_sysport_fini_rx_ring(priv);
1475
1476 free_irq(priv->irq0, dev);
1477 free_irq(priv->irq1, dev);
1478
1479 /* Disconnect from PHY */
1480 phy_disconnect(priv->phydev);
1481
1482 return 0;
1483}
1484
1485static struct ethtool_ops bcm_sysport_ethtool_ops = {
1486 .get_settings = bcm_sysport_get_settings,
1487 .set_settings = bcm_sysport_set_settings,
1488 .get_drvinfo = bcm_sysport_get_drvinfo,
1489 .get_msglevel = bcm_sysport_get_msglvl,
1490 .set_msglevel = bcm_sysport_set_msglvl,
1491 .get_link = ethtool_op_get_link,
1492 .get_strings = bcm_sysport_get_strings,
1493 .get_ethtool_stats = bcm_sysport_get_stats,
1494 .get_sset_count = bcm_sysport_get_sset_count,
1495};
1496
1497static const struct net_device_ops bcm_sysport_netdev_ops = {
1498 .ndo_start_xmit = bcm_sysport_xmit,
1499 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1500 .ndo_open = bcm_sysport_open,
1501 .ndo_stop = bcm_sysport_stop,
1502 .ndo_set_features = bcm_sysport_set_features,
1503 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
1504};
1505
1506#define REV_FMT "v%2x.%02x"
1507
1508static int bcm_sysport_probe(struct platform_device *pdev)
1509{
1510 struct bcm_sysport_priv *priv;
1511 struct device_node *dn;
1512 struct net_device *dev;
1513 const void *macaddr;
1514 struct resource *r;
1515 u32 txq, rxq;
1516 int ret;
1517
1518 dn = pdev->dev.of_node;
1519 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1520
1521 /* Read the Transmit/Receive Queue properties */
1522 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1523 txq = TDMA_NUM_RINGS;
1524 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1525 rxq = 1;
1526
1527 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1528 if (!dev)
1529 return -ENOMEM;
1530
1531 /* Initialize private members */
1532 priv = netdev_priv(dev);
1533
1534 priv->irq0 = platform_get_irq(pdev, 0);
1535 priv->irq1 = platform_get_irq(pdev, 1);
1536 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1537 dev_err(&pdev->dev, "invalid interrupts\n");
1538 ret = -EINVAL;
1539 goto err;
1540 }
1541
1542 priv->base = devm_ioremap_resource(&pdev->dev, r);
1543 if (IS_ERR(priv->base)) {
1544 ret = PTR_ERR(priv->base);
1545 goto err;
1546 }
1547
1548 priv->netdev = dev;
1549 priv->pdev = pdev;
1550
1551 priv->phy_interface = of_get_phy_mode(dn);
1552 /* Default to GMII interface mode */
1553 if (priv->phy_interface < 0)
1554 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1555
1556 /* In the case of a fixed PHY, the DT node associated
1557 * to the PHY is the Ethernet MAC DT node.
1558 */
1559 if (of_phy_is_fixed_link(dn)) {
1560 ret = of_phy_register_fixed_link(dn);
1561 if (ret) {
1562 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1563 goto err;
1564 }
1565
1566 priv->phy_dn = dn;
1567 }
1568
1569 /* Initialize netdevice members */
1570 macaddr = of_get_mac_address(dn);
1571 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1572 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1573 random_ether_addr(dev->dev_addr);
1574 } else {
1575 ether_addr_copy(dev->dev_addr, macaddr);
1576 }
1577
1578 SET_NETDEV_DEV(dev, &pdev->dev);
1579 dev_set_drvdata(&pdev->dev, dev);
1580 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
1581 dev->netdev_ops = &bcm_sysport_netdev_ops;
1582 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1583
1584 /* HW supported features, none enabled by default */
1585 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1587
1588 /* Set the needed headroom once and for all */
1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1590 dev->needed_headroom += sizeof(struct bcm_tsb);
1591
1592 /* We are interfaced to a switch which handles the multicast
1593 * filtering for us, so we do not support programming any
1594 * multicast hash table in this Ethernet MAC.
1595 */
1596 dev->flags &= ~IFF_MULTICAST;
1597
1598 /* libphy will adjust the link state accordingly */
1599 netif_carrier_off(dev);
1600
1601 ret = register_netdev(dev);
1602 if (ret) {
1603 dev_err(&pdev->dev, "failed to register net_device\n");
1604 goto err;
1605 }
1606
1607 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1608 dev_info(&pdev->dev,
1609 "Broadcom SYSTEMPORT" REV_FMT
1610 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1611 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1612 priv->base, priv->irq0, priv->irq1, txq, rxq);
1613
1614 return 0;
1615err:
1616 free_netdev(dev);
1617 return ret;
1618}
1619
1620static int bcm_sysport_remove(struct platform_device *pdev)
1621{
1622 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1623
1624 /* Not much to do, ndo_close has been called
1625 * and we use managed allocations
1626 */
1627 unregister_netdev(dev);
1628 free_netdev(dev);
1629 dev_set_drvdata(&pdev->dev, NULL);
1630
1631 return 0;
1632}
1633
1634static const struct of_device_id bcm_sysport_of_match[] = {
1635 { .compatible = "brcm,systemport-v1.00" },
1636 { .compatible = "brcm,systemport" },
1637 { /* sentinel */ }
1638};
1639
1640static struct platform_driver bcm_sysport_driver = {
1641 .probe = bcm_sysport_probe,
1642 .remove = bcm_sysport_remove,
1643 .driver = {
1644 .name = "brcm-systemport",
1645 .owner = THIS_MODULE,
1646 .of_match_table = bcm_sysport_of_match,
1647 },
1648};
1649module_platform_driver(bcm_sysport_driver);
1650
1651MODULE_AUTHOR("Broadcom Corporation");
1652MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
1653MODULE_ALIAS("platform:brcm-systemport");
1654MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..281c08246037
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,678 @@
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __BCM_SYSPORT_H
12#define __BCM_SYSPORT_H
13
14#include <linux/if_vlan.h>
15
16/* Receive/transmit descriptor format */
17#define DESC_ADDR_HI_STATUS_LEN 0x00
18#define DESC_ADDR_HI_SHIFT 0
19#define DESC_ADDR_HI_MASK 0xff
20#define DESC_STATUS_SHIFT 8
21#define DESC_STATUS_MASK 0x3ff
22#define DESC_LEN_SHIFT 18
23#define DESC_LEN_MASK 0x7fff
24#define DESC_ADDR_LO 0x04
25
26/* HW supports 40-bit addressing hence the */
27#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
28
29/* Default RX buffer allocation size */
30#define RX_BUF_LENGTH 2048
31
32/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
33 * 1536 is multiple of 256 bytes
34 */
35#define ENET_BRCM_TAG_LEN 4
36#define ENET_PAD 10
37#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
38 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
39
40/* Transmit status block */
41struct bcm_tsb {
42 u32 pcp_dei_vid;
43#define PCP_DEI_MASK 0xf
44#define VID_SHIFT 4
45#define VID_MASK 0xfff
46 u32 l4_ptr_dest_map;
47#define L4_CSUM_PTR_MASK 0x1ff
48#define L4_PTR_SHIFT 9
49#define L4_PTR_MASK 0x1ff
50#define L4_UDP (1 << 18)
51#define L4_LENGTH_VALID (1 << 19)
52#define DEST_MAP_SHIFT 20
53#define DEST_MAP_MASK 0x1ff
54};
55
56/* Receive status block uses the same
57 * definitions as the DMA descriptor
58 */
59struct bcm_rsb {
60 u32 rx_status_len;
61 u32 brcm_egress_tag;
62};
63
64/* Common Receive/Transmit status bits */
65#define DESC_L4_CSUM (1 << 7)
66#define DESC_SOP (1 << 8)
67#define DESC_EOP (1 << 9)
68
69/* Receive Status bits */
70#define RX_STATUS_UCAST 0
71#define RX_STATUS_BCAST 0x04
72#define RX_STATUS_MCAST 0x08
73#define RX_STATUS_L2_MCAST 0x0c
74#define RX_STATUS_ERR (1 << 4)
75#define RX_STATUS_OVFLOW (1 << 5)
76#define RX_STATUS_PARSE_FAIL (1 << 6)
77
78/* Transmit Status bits */
79#define TX_STATUS_VLAN_NO_ACT 0x00
80#define TX_STATUS_VLAN_PCP_TSB 0x01
81#define TX_STATUS_VLAN_QUEUE 0x02
82#define TX_STATUS_VLAN_VID_TSB 0x03
83#define TX_STATUS_OWR_CRC (1 << 2)
84#define TX_STATUS_APP_CRC (1 << 3)
85#define TX_STATUS_BRCM_TAG_NO_ACT 0
86#define TX_STATUS_BRCM_TAG_ZERO 0x10
87#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
88#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
89#define TX_STATUS_SKIP_BYTES (1 << 6)
90
91/* Specific register definitions */
92#define SYS_PORT_TOPCTRL_OFFSET 0
93#define REV_CNTL 0x00
94#define REV_MASK 0xffff
95
96#define RX_FLUSH_CNTL 0x04
97#define RX_FLUSH (1 << 0)
98
99#define TX_FLUSH_CNTL 0x08
100#define TX_FLUSH (1 << 0)
101
102#define MISC_CNTL 0x0c
103#define SYS_CLK_SEL (1 << 0)
104#define TDMA_EOP_SEL (1 << 1)
105
106/* Level-2 Interrupt controller offsets and defines */
107#define SYS_PORT_INTRL2_0_OFFSET 0x200
108#define SYS_PORT_INTRL2_1_OFFSET 0x240
109#define INTRL2_CPU_STATUS 0x00
110#define INTRL2_CPU_SET 0x04
111#define INTRL2_CPU_CLEAR 0x08
112#define INTRL2_CPU_MASK_STATUS 0x0c
113#define INTRL2_CPU_MASK_SET 0x10
114#define INTRL2_CPU_MASK_CLEAR 0x14
115
116/* Level-2 instance 0 interrupt bits */
117#define INTRL2_0_GISB_ERR (1 << 0)
118#define INTRL2_0_RBUF_OVFLOW (1 << 1)
119#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
120#define INTRL2_0_MPD (1 << 3)
121#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
122#define INTRL2_0_RDMA_MBDONE (1 << 5)
123#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
124#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
125#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
126#define INTRL2_0_TX_RING_FULL (1 << 9)
127#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
128#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
129
130/* RXCHK offset and defines */
131#define SYS_PORT_RXCHK_OFFSET 0x300
132
133#define RXCHK_CONTROL 0x00
134#define RXCHK_EN (1 << 0)
135#define RXCHK_SKIP_FCS (1 << 1)
136#define RXCHK_BAD_CSUM_DIS (1 << 2)
137#define RXCHK_BRCM_TAG_EN (1 << 3)
138#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
139#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
140#define RXCHK_PARSE_TNL (1 << 12)
141#define RXCHK_VIOL_EN (1 << 13)
142#define RXCHK_VIOL_DIS (1 << 14)
143#define RXCHK_INCOM_PKT (1 << 15)
144#define RXCHK_V6_DUPEXT_EN (1 << 16)
145#define RXCHK_V6_DUPEXT_DIS (1 << 17)
146#define RXCHK_ETHERTYPE_DIS (1 << 18)
147#define RXCHK_L2_HDR_DIS (1 << 19)
148#define RXCHK_L3_HDR_DIS (1 << 20)
149#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
150#define RXCHK_PARSE_AUTH (1 << 22)
151
152#define RXCHK_BRCM_TAG0 0x04
153#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
154#define RXCHK_BRCM_TAG0_MASK 0x24
155#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
156#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
157#define RXCHK_ETHERTYPE 0x48
158#define RXCHK_BAD_CSUM_CNTR 0x4C
159#define RXCHK_OTHER_DISC_CNTR 0x50
160
161/* TXCHCK offsets and defines */
162#define SYS_PORT_TXCHK_OFFSET 0x380
163#define TXCHK_PKT_RDY_THRESH 0x00
164
165/* Receive buffer offset and defines */
166#define SYS_PORT_RBUF_OFFSET 0x400
167
168#define RBUF_CONTROL 0x00
169#define RBUF_RSB_EN (1 << 0)
170#define RBUF_4B_ALGN (1 << 1)
171#define RBUF_BRCM_TAG_STRIP (1 << 2)
172#define RBUF_BAD_PKT_DISC (1 << 3)
173#define RBUF_RESUME_THRESH_SHIFT 4
174#define RBUF_RESUME_THRESH_MASK 0xff
175#define RBUF_OK_TO_SEND_SHIFT 12
176#define RBUF_OK_TO_SEND_MASK 0xff
177#define RBUF_CRC_REPLACE (1 << 20)
178#define RBUF_OK_TO_SEND_MODE (1 << 21)
179#define RBUF_RSB_SWAP (1 << 22)
180#define RBUF_ACPI_EN (1 << 23)
181
182#define RBUF_PKT_RDY_THRESH 0x04
183
184#define RBUF_STATUS 0x08
185#define RBUF_WOL_MODE (1 << 0)
186#define RBUF_MPD (1 << 1)
187#define RBUF_ACPI (1 << 2)
188
189#define RBUF_OVFL_DISC_CNTR 0x0c
190#define RBUF_ERR_PKT_CNTR 0x10
191
192/* Transmit buffer offset and defines */
193#define SYS_PORT_TBUF_OFFSET 0x600
194
195#define TBUF_CONTROL 0x00
196#define TBUF_BP_EN (1 << 0)
197#define TBUF_MAX_PKT_THRESH_SHIFT 1
198#define TBUF_MAX_PKT_THRESH_MASK 0x1f
199#define TBUF_FULL_THRESH_SHIFT 8
200#define TBUF_FULL_THRESH_MASK 0x1f
201
202/* UniMAC offset and defines */
203#define SYS_PORT_UMAC_OFFSET 0x800
204
205#define UMAC_CMD 0x008
206#define CMD_TX_EN (1 << 0)
207#define CMD_RX_EN (1 << 1)
208#define CMD_SPEED_SHIFT 2
209#define CMD_SPEED_10 0
210#define CMD_SPEED_100 1
211#define CMD_SPEED_1000 2
212#define CMD_SPEED_2500 3
213#define CMD_SPEED_MASK 3
214#define CMD_PROMISC (1 << 4)
215#define CMD_PAD_EN (1 << 5)
216#define CMD_CRC_FWD (1 << 6)
217#define CMD_PAUSE_FWD (1 << 7)
218#define CMD_RX_PAUSE_IGNORE (1 << 8)
219#define CMD_TX_ADDR_INS (1 << 9)
220#define CMD_HD_EN (1 << 10)
221#define CMD_SW_RESET (1 << 13)
222#define CMD_LCL_LOOP_EN (1 << 15)
223#define CMD_AUTO_CONFIG (1 << 22)
224#define CMD_CNTL_FRM_EN (1 << 23)
225#define CMD_NO_LEN_CHK (1 << 24)
226#define CMD_RMT_LOOP_EN (1 << 25)
227#define CMD_PRBL_EN (1 << 27)
228#define CMD_TX_PAUSE_IGNORE (1 << 28)
229#define CMD_TX_RX_EN (1 << 29)
230#define CMD_RUNT_FILTER_DIS (1 << 30)
231
232#define UMAC_MAC0 0x00c
233#define UMAC_MAC1 0x010
234#define UMAC_MAX_FRAME_LEN 0x014
235
236#define UMAC_TX_FLUSH 0x334
237
238#define UMAC_MIB_START 0x400
239
240/* There is a 0xC gap between the end of RX and beginning of TX stats and then
241 * between the end of TX stats and the beginning of the RX RUNT
242 */
243#define UMAC_MIB_STAT_OFFSET 0xc
244
245#define UMAC_MIB_CTRL 0x580
246#define MIB_RX_CNT_RST (1 << 0)
247#define MIB_RUNT_CNT_RST (1 << 1)
248#define MIB_TX_CNT_RST (1 << 2)
249#define UMAC_MDF_CTRL 0x650
250#define UMAC_MDF_ADDR 0x654
251
252/* Receive DMA offset and defines */
253#define SYS_PORT_RDMA_OFFSET 0x2000
254
255#define RDMA_CONTROL 0x1000
256#define RDMA_EN (1 << 0)
257#define RDMA_RING_CFG (1 << 1)
258#define RDMA_DISC_EN (1 << 2)
259#define RDMA_BUF_DATA_OFFSET_SHIFT 4
260#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
261
262#define RDMA_STATUS 0x1004
263#define RDMA_DISABLED (1 << 0)
264#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
265#define RDMA_BP_STATUS (1 << 2)
266
267#define RDMA_SCB_BURST_SIZE 0x1008
268
269#define RDMA_RING_BUF_SIZE 0x100c
270#define RDMA_RING_SIZE_SHIFT 16
271
272#define RDMA_WRITE_PTR_HI 0x1010
273#define RDMA_WRITE_PTR_LO 0x1014
274#define RDMA_PROD_INDEX 0x1018
275#define RDMA_PROD_INDEX_MASK 0xffff
276
277#define RDMA_CONS_INDEX 0x101c
278#define RDMA_CONS_INDEX_MASK 0xffff
279
280#define RDMA_START_ADDR_HI 0x1020
281#define RDMA_START_ADDR_LO 0x1024
282#define RDMA_END_ADDR_HI 0x1028
283#define RDMA_END_ADDR_LO 0x102c
284
285#define RDMA_MBDONE_INTR 0x1030
286#define RDMA_INTR_THRESH_MASK 0xff
287#define RDMA_TIMEOUT_SHIFT 16
288#define RDMA_TIMEOUT_MASK 0xffff
289
290#define RDMA_XON_XOFF_THRESH 0x1034
291#define RDMA_XON_XOFF_THRESH_MASK 0xffff
292#define RDMA_XOFF_THRESH_SHIFT 16
293
294#define RDMA_READ_PTR_HI 0x1038
295#define RDMA_READ_PTR_LO 0x103c
296
297#define RDMA_OVERRIDE 0x1040
298#define RDMA_LE_MODE (1 << 0)
299#define RDMA_REG_MODE (1 << 1)
300
301#define RDMA_TEST 0x1044
302#define RDMA_TP_OUT_SEL (1 << 0)
303#define RDMA_MEM_SEL (1 << 1)
304
305#define RDMA_DEBUG 0x1048
306
307/* Transmit DMA offset and defines */
308#define TDMA_NUM_RINGS 32 /* rings = queues */
309#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
310
311#define SYS_PORT_TDMA_OFFSET 0x4000
312#define TDMA_WRITE_PORT_OFFSET 0x0000
313#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
314 (i) * TDMA_PORT_SIZE)
315#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
316 sizeof(u32) + (i) * TDMA_PORT_SIZE)
317
318#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
319 (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
320#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
321 (i) * TDMA_PORT_SIZE)
322#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
323 sizeof(u32) + (i) * TDMA_PORT_SIZE)
324
325#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
326 (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
327#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
328 (i) * sizeof(u32))
329
330#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
331 (TDMA_NUM_RINGS * sizeof(u32)))
332
333/* Register offsets and defines relatives to a specific ring number */
334#define RING_HEAD_TAIL_PTR 0x00
335#define RING_HEAD_MASK 0x7ff
336#define RING_TAIL_SHIFT 11
337#define RING_TAIL_MASK 0x7ff
338#define RING_FLUSH (1 << 24)
339#define RING_EN (1 << 25)
340
341#define RING_COUNT 0x04
342#define RING_COUNT_MASK 0x7ff
343#define RING_BUFF_DONE_SHIFT 11
344#define RING_BUFF_DONE_MASK 0x7ff
345
346#define RING_MAX_HYST 0x08
347#define RING_MAX_THRESH_MASK 0x7ff
348#define RING_HYST_THRESH_SHIFT 11
349#define RING_HYST_THRESH_MASK 0x7ff
350
351#define RING_INTR_CONTROL 0x0c
352#define RING_INTR_THRESH_MASK 0x7ff
353#define RING_EMPTY_INTR_EN (1 << 15)
354#define RING_TIMEOUT_SHIFT 16
355#define RING_TIMEOUT_MASK 0xffff
356
357#define RING_PROD_CONS_INDEX 0x10
358#define RING_PROD_INDEX_MASK 0xffff
359#define RING_CONS_INDEX_SHIFT 16
360#define RING_CONS_INDEX_MASK 0xffff
361
362#define RING_MAPPING 0x14
363#define RING_QID_MASK 0x3
364#define RING_PORT_ID_SHIFT 3
365#define RING_PORT_ID_MASK 0x7
366#define RING_IGNORE_STATUS (1 << 6)
367#define RING_FAILOVER_EN (1 << 7)
368#define RING_CREDIT_SHIFT 8
369#define RING_CREDIT_MASK 0xffff
370
371#define RING_PCP_DEI_VID 0x18
372#define RING_VID_MASK 0x7ff
373#define RING_DEI (1 << 12)
374#define RING_PCP_SHIFT 13
375#define RING_PCP_MASK 0x7
376#define RING_PKT_SIZE_ADJ_SHIFT 16
377#define RING_PKT_SIZE_ADJ_MASK 0xf
378
379#define TDMA_DESC_RING_SIZE 28
380
381/* Defininition for a given TX ring base address */
382#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
383 ((i) * TDMA_DESC_RING_SIZE))
384
385/* Ring indexed register addreses */
386#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
387 RING_HEAD_TAIL_PTR)
388#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
389 RING_COUNT)
390#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
391 RING_MAX_HYST)
392#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
393 RING_INTR_CONTROL)
394#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
395 (TDMA_DESC_RING_BASE(i) + \
396 RING_PROD_CONS_INDEX)
397#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
398 RING_MAPPING)
399#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
400 RING_PCP_DEI_VID)
401
402#define TDMA_CONTROL 0x600
403#define TDMA_EN (1 << 0)
404#define TSB_EN (1 << 1)
405#define TSB_SWAP (1 << 2)
406#define ACB_ALGO (1 << 3)
407#define BUF_DATA_OFFSET_SHIFT 4
408#define BUF_DATA_OFFSET_MASK 0x3ff
409#define VLAN_EN (1 << 14)
410#define SW_BRCM_TAG (1 << 15)
411#define WNC_KPT_SIZE_UPDATE (1 << 16)
412#define SYNC_PKT_SIZE (1 << 17)
413#define ACH_TXDONE_DELAY_SHIFT 18
414#define ACH_TXDONE_DELAY_MASK 0xff
415
416#define TDMA_STATUS 0x604
417#define TDMA_DISABLED (1 << 0)
418#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
419
420#define TDMA_SCB_BURST_SIZE 0x608
421#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
422#define TDMA_OVER_HYST_THRESH_STATUS 0x610
423#define TDMA_TPID 0x614
424
425#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
426#define TDMA_FREE_HEAD_MASK 0x7ff
427#define TDMA_FREE_TAIL_SHIFT 11
428#define TDMA_FREE_TAIL_MASK 0x7ff
429
430#define TDMA_FREE_LIST_COUNT 0x61c
431#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
432
433#define TDMA_TIER2_ARB_CTRL 0x620
434#define TDMA_ARB_MODE_RR 0
435#define TDMA_ARB_MODE_WEIGHT_RR 0x1
436#define TDMA_ARB_MODE_STRICT 0x2
437#define TDMA_ARB_MODE_DEFICIT_RR 0x3
438#define TDMA_CREDIT_SHIFT 4
439#define TDMA_CREDIT_MASK 0xffff
440
441#define TDMA_TIER1_ARB_0_CTRL 0x624
442#define TDMA_ARB_EN (1 << 0)
443
444#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
445#define TDMA_TIER1_ARB_1_CTRL 0x62c
446#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
447#define TDMA_TIER1_ARB_2_CTRL 0x634
448#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
449#define TDMA_TIER1_ARB_3_CTRL 0x63c
450#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
451
452#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
453#define TDMA_LE_MODE (1 << 0)
454#define TDMA_REG_MODE (1 << 1)
455
456#define TDMA_TEST 0x648
457#define TDMA_TP_OUT_SEL (1 << 0)
458#define TDMA_MEM_TM (1 << 1)
459
460#define TDMA_DEBUG 0x64c
461
462/* Transmit/Receive descriptor */
463struct dma_desc {
464 u32 addr_status_len;
465 u32 addr_lo;
466};
467
468/* Number of Receive hardware descriptor words */
469#define NUM_HW_RX_DESC_WORDS 1024
470/* Real number of usable descriptors */
471#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
472
473/* Internal linked-list RAM has up to 1536 entries */
474#define NUM_TX_DESC 1536
475
476#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
477
478/* Rx/Tx common counter group.*/
479struct bcm_sysport_pkt_counters {
480 u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
481 u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
482 u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
483 u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
484 u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
485 u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
486 u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
487 u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
488 u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
489 u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
490};
491
492/* RSV, Receive Status Vector */
493struct bcm_sysport_rx_counters {
494 struct bcm_sysport_pkt_counters pkt_cnt;
495 u32 pkt; /* RO (0x428) Received pkt count*/
496 u32 bytes; /* RO Received byte count */
497 u32 mca; /* RO # of Received multicast pkt */
498 u32 bca; /* RO # of Receive broadcast pkt */
499 u32 fcs; /* RO # of Received FCS error */
500 u32 cf; /* RO # of Received control frame pkt*/
501 u32 pf; /* RO # of Received pause frame pkt */
502 u32 uo; /* RO # of unknown op code pkt */
503 u32 aln; /* RO # of alignment error count */
504 u32 flr; /* RO # of frame length out of range count */
505 u32 cde; /* RO # of code error pkt */
506 u32 fcr; /* RO # of carrier sense error pkt */
507 u32 ovr; /* RO # of oversize pkt*/
508 u32 jbr; /* RO # of jabber count */
509 u32 mtue; /* RO # of MTU error pkt*/
510 u32 pok; /* RO # of Received good pkt */
511 u32 uc; /* RO # of unicast pkt */
512 u32 ppp; /* RO # of PPP pkt */
513 u32 rcrc; /* RO (0x470),# of CRC match pkt */
514};
515
516/* TSV, Transmit Status Vector */
517struct bcm_sysport_tx_counters {
518 struct bcm_sysport_pkt_counters pkt_cnt;
519 u32 pkts; /* RO (0x4a8) Transmited pkt */
520 u32 mca; /* RO # of xmited multicast pkt */
521 u32 bca; /* RO # of xmited broadcast pkt */
522 u32 pf; /* RO # of xmited pause frame count */
523 u32 cf; /* RO # of xmited control frame count */
524 u32 fcs; /* RO # of xmited FCS error count */
525 u32 ovr; /* RO # of xmited oversize pkt */
526 u32 drf; /* RO # of xmited deferral pkt */
527 u32 edf; /* RO # of xmited Excessive deferral pkt*/
528 u32 scl; /* RO # of xmited single collision pkt */
529 u32 mcl; /* RO # of xmited multiple collision pkt*/
530 u32 lcl; /* RO # of xmited late collision pkt */
531 u32 ecl; /* RO # of xmited excessive collision pkt*/
532 u32 frg; /* RO # of xmited fragments pkt*/
533 u32 ncl; /* RO # of xmited total collision count */
534 u32 jbr; /* RO # of xmited jabber count*/
535 u32 bytes; /* RO # of xmited byte count */
536 u32 pok; /* RO # of xmited good pkt */
537 u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
538};
539
540struct bcm_sysport_mib {
541 struct bcm_sysport_rx_counters rx;
542 struct bcm_sysport_tx_counters tx;
543 u32 rx_runt_cnt;
544 u32 rx_runt_fcs;
545 u32 rx_runt_fcs_align;
546 u32 rx_runt_bytes;
547 u32 rxchk_bad_csum;
548 u32 rxchk_other_pkt_disc;
549 u32 rbuf_ovflow_cnt;
550 u32 rbuf_err_cnt;
551};
552
553/* HW maintains a large list of counters */
554enum bcm_sysport_stat_type {
555 BCM_SYSPORT_STAT_NETDEV = -1,
556 BCM_SYSPORT_STAT_MIB_RX,
557 BCM_SYSPORT_STAT_MIB_TX,
558 BCM_SYSPORT_STAT_RUNT,
559 BCM_SYSPORT_STAT_RXCHK,
560 BCM_SYSPORT_STAT_RBUF,
561};
562
563/* Macros to help define ethtool statistics */
564#define STAT_NETDEV(m) { \
565 .stat_string = __stringify(m), \
566 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
567 .stat_offset = offsetof(struct net_device_stats, m), \
568 .type = BCM_SYSPORT_STAT_NETDEV, \
569}
570
571#define STAT_MIB(str, m, _type) { \
572 .stat_string = str, \
573 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
574 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
575 .type = _type, \
576}
577
578#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
579#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
580#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
581
582#define STAT_RXCHK(str, m, ofs) { \
583 .stat_string = str, \
584 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
585 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
586 .type = BCM_SYSPORT_STAT_RXCHK, \
587 .reg_offset = ofs, \
588}
589
590#define STAT_RBUF(str, m, ofs) { \
591 .stat_string = str, \
592 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
593 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
594 .type = BCM_SYSPORT_STAT_RBUF, \
595 .reg_offset = ofs, \
596}
597
598struct bcm_sysport_stats {
599 char stat_string[ETH_GSTRING_LEN];
600 int stat_sizeof;
601 int stat_offset;
602 enum bcm_sysport_stat_type type;
603 /* reg offset from UMAC base for misc counters */
604 u16 reg_offset;
605};
606
607/* Software house keeping helper structure */
608struct bcm_sysport_cb {
609 struct sk_buff *skb; /* SKB for RX packets */
610 void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
611
612 DEFINE_DMA_UNMAP_ADDR(dma_addr);
613 DEFINE_DMA_UNMAP_LEN(dma_len);
614};
615
616/* Software view of the TX ring */
617struct bcm_sysport_tx_ring {
618 spinlock_t lock; /* Ring lock for tx reclaim/xmit */
619 struct napi_struct napi; /* NAPI per tx queue */
620 dma_addr_t desc_dma; /* DMA cookie */
621 unsigned int index; /* Ring index */
622 unsigned int size; /* Ring current size */
623 unsigned int alloc_size; /* Ring one-time allocated size */
624 unsigned int desc_count; /* Number of descriptors */
625 unsigned int curr_desc; /* Current descriptor */
626 unsigned int c_index; /* Last consumer index */
627 unsigned int p_index; /* Current producer index */
628 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
629 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
630 struct bcm_sysport_priv *priv; /* private context backpointer */
631};
632
633/* Driver private structure */
634struct bcm_sysport_priv {
635 void __iomem *base;
636 u32 irq0_stat;
637 u32 irq0_mask;
638 u32 irq1_stat;
639 u32 irq1_mask;
640 struct napi_struct napi ____cacheline_aligned;
641 struct net_device *netdev;
642 struct platform_device *pdev;
643 int irq0;
644 int irq1;
645
646 /* Transmit rings */
647 struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
648
649 /* Receive queue */
650 void __iomem *rx_bds;
651 void __iomem *rx_bd_assign_ptr;
652 unsigned int rx_bd_assign_index;
653 struct bcm_sysport_cb *rx_cbs;
654 unsigned int num_rx_bds;
655 unsigned int rx_read_ptr;
656 unsigned int rx_c_index;
657
658 /* PHY device */
659 struct device_node *phy_dn;
660 struct phy_device *phydev;
661 phy_interface_t phy_interface;
662 int old_pause;
663 int old_link;
664 int old_duplex;
665
666 /* Misc fields */
667 unsigned int rx_csum_en:1;
668 unsigned int tsb_en:1;
669 unsigned int crc_fwd:1;
670 u16 rev;
671
672 /* MIB related fields */
673 struct bcm_sysport_mib mib;
674
675 /* Ethtool */
676 u32 msg_enable;
677};
678#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0297a79a38e1..05c6af6c418f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
1436 return -ENOMEM; 1436 return -ENOMEM;
1437 net_dev->netdev_ops = &bgmac_netdev_ops; 1437 net_dev->netdev_ops = &bgmac_netdev_ops;
1438 net_dev->irq = core->irq; 1438 net_dev->irq = core->irq;
1439 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); 1439 net_dev->ethtool_ops = &bgmac_ethtool_ops;
1440 bgmac = netdev_priv(net_dev); 1440 bgmac = netdev_priv(net_dev);
1441 bgmac->net_dev = net_dev; 1441 bgmac->net_dev = net_dev;
1442 bgmac->core = core; 1442 bgmac->core = core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0ab83708b6a1..67d2b0047371 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6916,8 +6916,8 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6916 } 6916 }
6917 } 6917 }
6918 else { 6918 else {
6919 ethtool_cmd_speed_set(cmd, -1); 6919 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6920 cmd->duplex = -1; 6920 cmd->duplex = DUPLEX_UNKNOWN;
6921 } 6921 }
6922 spin_unlock_bh(&bp->phy_lock); 6922 spin_unlock_bh(&bp->phy_lock);
6923 6923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4d8f8aba0ea5..4cab09d3f807 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 */ 12 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dd57c7c5a3da..47c5814114e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
906 bd_prod = RX_BD(bd_prod); 906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons); 907 bd_cons = RX_BD(bd_cons);
908 908
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
918 */
919 rmb();
920
909 cqe_fp_flags = cqe_fp->type_error_flags; 921 cqe_fp_flags = cqe_fp->type_error_flags;
910 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3448cc033ca5..571427c7226b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 97ea5421dd96..51a952c51cb1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Dmitry Kravkov 16 * Written by: Dmitry Kravkov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 804b8f64463e..c6939ecb02c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Dmitry Kravkov 16 * Written by: Dmitry Kravkov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index b6de05e3149b..bd0600cf7266 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
3316 return T_ETH_INDIRECTION_TABLE_SIZE; 3316 return T_ETH_INDIRECTION_TABLE_SIZE;
3317} 3317}
3318 3318
3319static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 3319static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
3320{ 3320{
3321 struct bnx2x *bp = netdev_priv(dev); 3321 struct bnx2x *bp = netdev_priv(dev);
3322 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 3322 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
3340 return 0; 3340 return 0;
3341} 3341}
3342 3342
3343static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) 3343static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
3344 const u8 *key)
3344{ 3345{
3345 struct bnx2x *bp = netdev_priv(dev); 3346 struct bnx2x *bp = netdev_priv(dev);
3346 size_t i; 3347 size_t i;
3347 3348
3348 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 3349 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3349 /* 3350 /*
3350 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() 3351 * The same as in bnx2x_get_rxfh: we can't use a memcpy()
3351 * as an internal storage of an indirection table is a u8 array 3352 * as an internal storage of an indirection table is a u8 array
3352 * while indir->ring_index points to an array of u32. 3353 * while indir->ring_index points to an array of u32.
3353 * 3354 *
@@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3471 .get_rxnfc = bnx2x_get_rxnfc, 3472 .get_rxnfc = bnx2x_get_rxnfc,
3472 .set_rxnfc = bnx2x_set_rxnfc, 3473 .set_rxnfc = bnx2x_set_rxnfc,
3473 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3474 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3474 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3475 .get_rxfh = bnx2x_get_rxfh,
3475 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3476 .set_rxfh = bnx2x_set_rxfh,
3476 .get_channels = bnx2x_get_channels, 3477 .get_channels = bnx2x_get_channels,
3477 .set_channels = bnx2x_set_channels, 3478 .set_channels = bnx2x_set_channels,
3478 .get_module_info = bnx2x_get_module_info, 3479 .get_module_info = bnx2x_get_module_info,
@@ -3498,16 +3499,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3498 .get_rxnfc = bnx2x_get_rxnfc, 3499 .get_rxnfc = bnx2x_get_rxnfc,
3499 .set_rxnfc = bnx2x_set_rxnfc, 3500 .set_rxnfc = bnx2x_set_rxnfc,
3500 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3501 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3501 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3502 .get_rxfh = bnx2x_get_rxfh,
3502 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3503 .set_rxfh = bnx2x_set_rxfh,
3503 .get_channels = bnx2x_get_channels, 3504 .get_channels = bnx2x_get_channels,
3504 .set_channels = bnx2x_set_channels, 3505 .set_channels = bnx2x_set_channels,
3505}; 3506};
3506 3507
3507void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) 3508void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
3508{ 3509{
3509 if (IS_PF(bp)) 3510 netdev->ethtool_ops = (IS_PF(bp)) ?
3510 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); 3511 &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
3511 else /* vf */
3512 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
3513} 3512}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae164fce..8aafd9b5d6a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Vladislav Zolotarov <vladz@broadcom.com> 10 * Written by: Vladislav Zolotarov
11 * Based on the original idea of John Wright <john.wright@hp.com>. 11 * Based on the original idea of John Wright <john.wright@hp.com>.
12 */ 12 */
13 13
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c2dfea7968f4..bd90e50bd8e6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation. 8 * the Free Software Foundation.
9 * 9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
11 * Written by: Eliezer Tamir 11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com> 12 * Modified by: Vladislav Zolotarov
13 */ 13 */
14 14
15#ifndef BNX2X_INIT_H 15#ifndef BNX2X_INIT_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd900960..5669ed2e87d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 * 10 *
11 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Vladislav Zolotarov <vladz@broadcom.com> 12 * Written by: Vladislav Zolotarov
13 */ 13 */
14 14
15#ifndef BNX2X_INIT_OPS_H 15#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9b6b3d7304b6..53fb4fa61b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2218,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
2218 */ 2218 */
2219 u32 val; 2219 u32 val;
2220 struct bnx2x *bp = params->bp; 2220 struct bnx2x *bp = params->bp;
2221 int bnx2x_status = 0;
2222 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); 2221 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
2223 2222
2224 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 2223 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2232,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
2232 bnx2x_update_pfc_nig(params, vars, pfc_params); 2231 bnx2x_update_pfc_nig(params, vars, pfc_params);
2233 2232
2234 if (!vars->link_up) 2233 if (!vars->link_up)
2235 return bnx2x_status; 2234 return 0;
2236 2235
2237 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); 2236 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
2238 2237
@@ -2246,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
2246 == 0) { 2245 == 0) {
2247 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); 2246 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
2248 bnx2x_emac_enable(params, vars, 0); 2247 bnx2x_emac_enable(params, vars, 0);
2249 return bnx2x_status; 2248 return 0;
2250 } 2249 }
2251 if (CHIP_IS_E2(bp)) 2250 if (CHIP_IS_E2(bp))
2252 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); 2251 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2260,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
2260 val = 1; 2259 val = 1;
2261 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); 2260 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
2262 } 2261 }
2263 return bnx2x_status; 2262 return 0;
2264} 2263}
2265 2264
2266static int bnx2x_bmac1_enable(struct link_params *params, 2265static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3703,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
3703static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3702static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3704 struct link_params *params, 3703 struct link_params *params,
3705 struct link_vars *vars) { 3704 struct link_vars *vars) {
3706 u16 lane, i, cl72_ctrl, an_adv = 0; 3705 u16 lane, i, cl72_ctrl, an_adv = 0, val;
3706 u32 wc_lane_config;
3707 struct bnx2x *bp = params->bp; 3707 struct bnx2x *bp = params->bp;
3708 static struct bnx2x_reg_set reg_set[] = { 3708 static struct bnx2x_reg_set reg_set[] = {
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3709 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3822,15 +3822,27 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3822 /* Enable Auto-Detect to support 1G over CL37 as well */ 3822 /* Enable Auto-Detect to support 1G over CL37 as well */
3823 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3823 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3824 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); 3824 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3825 3825 wc_lane_config = REG_RD(bp, params->shmem_base +
3826 offsetof(struct shmem_region, dev_info.
3827 shared_hw_config.wc_lane_config));
3828 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3829 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
3826 /* Force cl48 sync_status LOW to avoid getting stuck in CL73 3830 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3827 * parallel-detect loop when CL73 and CL37 are enabled. 3831 * parallel-detect loop when CL73 and CL37 are enabled.
3828 */ 3832 */
3829 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3833 val |= 1 << 11;
3830 MDIO_AER_BLOCK_AER_REG, 0); 3834
3835 /* Restore Polarity settings in case it was run over by
3836 * previous link owner
3837 */
3838 if (wc_lane_config &
3839 (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
3840 val |= 3 << 2;
3841 else
3842 val &= ~(3 << 2);
3831 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3843 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3832 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); 3844 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
3833 bnx2x_set_aer_mmd(params, phy); 3845 val);
3834 3846
3835 bnx2x_disable_kr2(params, vars, phy); 3847 bnx2x_disable_kr2(params, vars, phy);
3836 } 3848 }
@@ -6473,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6473static int bnx2x_link_initialize(struct link_params *params, 6485static int bnx2x_link_initialize(struct link_params *params,
6474 struct link_vars *vars) 6486 struct link_vars *vars)
6475{ 6487{
6476 int rc = 0;
6477 u8 phy_index, non_ext_phy; 6488 u8 phy_index, non_ext_phy;
6478 struct bnx2x *bp = params->bp; 6489 struct bnx2x *bp = params->bp;
6479 /* In case of external phy existence, the line speed would be the 6490 /* In case of external phy existence, the line speed would be the
@@ -6546,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6546 NIG_STATUS_XGXS0_LINK_STATUS | 6557 NIG_STATUS_XGXS0_LINK_STATUS |
6547 NIG_STATUS_SERDES0_LINK_STATUS | 6558 NIG_STATUS_SERDES0_LINK_STATUS |
6548 NIG_MASK_MI_INT)); 6559 NIG_MASK_MI_INT));
6549 return rc; 6560 return 0;
6550} 6561}
6551 6562
6552static void bnx2x_int_link_reset(struct bnx2x_phy *phy, 6563static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -12461,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
12461 u32 dont_clear_stat, lfa_sts; 12472 u32 dont_clear_stat, lfa_sts;
12462 struct bnx2x *bp = params->bp; 12473 struct bnx2x *bp = params->bp;
12463 12474
12475 bnx2x_set_mdio_emac_per_phy(bp, params);
12464 /* Sync the link parameters */ 12476 /* Sync the link parameters */
12465 bnx2x_link_status_update(params, vars); 12477 bnx2x_link_status_update(params, vars);
12466 12478
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3a8e51ed5bec..2887034523e0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -10053,6 +10053,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056
10057static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10058{
10059 /* UNDI marks its presence in DORQ -
10060 * it initializes CID offset for normal bell to 0x7
10061 */
10062 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10063 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10064 return false;
10065
10066 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10067 BNX2X_DEV_INFO("UNDI previously loaded\n");
10068 return true;
10069 }
10070
10071 return false;
10072}
10073
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10074static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10075{
10058 u8 major, minor, version; 10076 u8 major, minor, version;
@@ -10302,6 +10320,10 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10302 10320
10303 BNX2X_DEV_INFO("Path is unmarked\n"); 10321 BNX2X_DEV_INFO("Path is unmarked\n");
10304 10322
10323 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10324 if (bnx2x_prev_is_after_undi(bp))
10325 goto out;
10326
10305 /* If function has FLR capabilities, and existing FW version matches 10327 /* If function has FLR capabilities, and existing FW version matches
10306 * the one required, then FLR will be sufficient to clean any residue 10328 * the one required, then FLR will be sufficient to clean any residue
10307 * left by previous driver 10329 * left by previous driver
@@ -10322,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10322 10344
10323 BNX2X_DEV_INFO("Could not FLR\n"); 10345 BNX2X_DEV_INFO("Could not FLR\n");
10324 10346
10347out:
10325 /* Close the MCP request, return failure*/ 10348 /* Close the MCP request, return failure*/
10326 rc = bnx2x_prev_mcp_done(bp); 10349 rc = bnx2x_prev_mcp_done(bp);
10327 if (!rc) 10350 if (!rc)
@@ -10360,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10360 /* close LLH filters towards the BRB */ 10383 /* close LLH filters towards the BRB */
10361 bnx2x_set_rx_filter(&bp->link_params, 0); 10384 bnx2x_set_rx_filter(&bp->link_params, 0);
10362 10385
10363 /* Check if the UNDI driver was previously loaded 10386 /* Check if the UNDI driver was previously loaded */
10364 * UNDI driver initializes CID offset for normal bell to 0x7 10387 if (bnx2x_prev_is_after_undi(bp)) {
10365 */ 10388 prev_undi = true;
10366 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 10389 /* clear the UNDI indication */
10367 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 10390 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10368 if (tmp_reg == 0x7) { 10391 /* clear possible idle check errors */
10369 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10392 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10370 prev_undi = true;
10371 /* clear the UNDI indication */
10372 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10373 /* clear possible idle check errors */
10374 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10375 }
10376 } 10393 }
10377 if (!CHIP_IS_E1x(bp)) 10394 if (!CHIP_IS_E1x(bp))
10378 /* block FW from writing to host */ 10395 /* block FW from writing to host */
@@ -13283,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13283 netdev_reset_tc(bp->dev); 13300 netdev_reset_tc(bp->dev);
13284 13301
13285 del_timer_sync(&bp->timer); 13302 del_timer_sync(&bp->timer);
13286 cancel_delayed_work(&bp->sp_task); 13303 cancel_delayed_work_sync(&bp->sp_task);
13287 cancel_delayed_work(&bp->period_task); 13304 cancel_delayed_work_sync(&bp->period_task);
13288 13305
13289 spin_lock_bh(&bp->stats_lock); 13306 spin_lock_bh(&bp->stats_lock);
13290 bp->stats_state = STATS_STATE_DISABLED; 13307 bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d725317c4277..b1936044767a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Vladislav Zolotarov 16 * Written by: Vladislav Zolotarov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 80f6c790ed88..718ecd294661 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Vladislav Zolotarov 16 * Written by: Vladislav Zolotarov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index faf01488d26e..eda8583f6fc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 * 18 *
19 */ 19 */
20#include "bnx2x.h" 20#include "bnx2x.h"
@@ -1071,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1073 1073
1074 /* set the VF doorbell threshold */ 1074 /* set the VF doorbell threshold. This threshold represents the amount
1075 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1075 * of doorbells allowed in the main DORQ fifo for a specific VF.
1076 */
1077 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1076} 1078}
1077 1079
1078void bnx2x_iov_init_dmae(struct bnx2x *bp) 1080void bnx2x_iov_init_dmae(struct bnx2x *bp)
@@ -2576,7 +2578,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2576 2578
2577 ivi->vf = vfidx; 2579 ivi->vf = vfidx;
2578 ivi->qos = 0; 2580 ivi->qos = 0;
2579 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 2581 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2582 ivi->min_tx_rate = 0;
2580 ivi->spoofchk = 1; /*always enabled */ 2583 ivi->spoofchk = 1; /*always enabled */
2581 if (vf->state == VF_ENABLED) { 2584 if (vf->state == VF_ENABLED) {
2582 /* mac and vlan are in vlan_mac objects */ 2585 /* mac and vlan are in vlan_mac objects */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 6929adba52f9..96c575e147a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 */ 18 */
19#ifndef BNX2X_SRIOV_H 19#ifndef BNX2X_SRIOV_H
20#define BNX2X_SRIOV_H 20#define BNX2X_SRIOV_H
@@ -571,7 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
571 return NULL; 571 return NULL;
572} 572}
573 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; } 574static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 3b75070411aa..ca47665f94bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index f35845006cdd..2beceaefdeea 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 784c7155b98a..d712d0ddd719 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 */ 18 */
19 19
20#include "bnx2x.h" 20#include "bnx2x.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index c922b81170e5..e21e706762c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Ariel Elior <ariele@broadcom.com> 16 * Written by: Ariel Elior <ariel.elior@qlogic.com>
17 */ 17 */
18#ifndef VF_PF_IF_H 18#ifndef VF_PF_IF_H
19#define VF_PF_IF_H 19#define VF_PF_IF_H
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4dd48d2fa804..8244e2b14bb4 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
608 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 return -EINVAL; 609 return -EINVAL;
610 } 610 }
611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
611 mutex_lock(&cnic_lock); 615 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
620 } 624 }
621 mutex_unlock(&cnic_lock); 625 mutex_unlock(&cnic_lock);
622 626
623 if (ulp_type == CNIC_ULP_ISCSI) 627 if (ulp_type == CNIC_ULP_FCOE)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL; 628 dev->fcoe_cap = NULL;
627 629
628 synchronize_rcu(); 630 synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1039 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_local *cp = dev->cnic_priv;
1040 struct cnic_uio_dev *udev; 1042 struct cnic_uio_dev *udev;
1041 1043
1042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) { 1045 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev; 1046 udev->dev = dev;
1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL; 1048 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM; 1049 return -ENOMEM;
1050 } 1050 }
1051 cp->udev = udev; 1051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0; 1052 return 0;
1054 } 1053 }
1055 } 1054 }
1056 read_unlock(&cnic_dev_lock);
1057 1055
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev) 1057 if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1067 if (__cnic_alloc_uio_rings(udev, pages)) 1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev; 1066 goto err_udev;
1069 1067
1070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list); 1068 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073 1069
1074 pci_dev_get(udev->pdev); 1070 pci_dev_get(udev->pdev);
1075 1071
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5624{ 5620{
5625 int if_type; 5621 int if_type;
5626 5622
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops; 5624 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx; 5625 void *ctx;
5631 5626
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5627 mutex_lock(&cnic_lock);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent) 5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
5634 continue; 5632 continue;
5633 }
5635 5634
5636 ctx = cp->ulp_handle[if_type]; 5635 ctx = cp->ulp_handle[if_type];
5637 5636
5637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5639 } 5643 }
5640 rcu_read_unlock();
5641} 5644}
5642 5645
5643/* netdev event handler */ 5646/* netdev event handler */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0966bd04375f..5ba1cfbd60da 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
2481 dev_set_drvdata(&pdev->dev, dev); 2481 dev_set_drvdata(&pdev->dev, dev);
2482 ether_addr_copy(dev->dev_addr, macaddr); 2482 ether_addr_copy(dev->dev_addr, macaddr);
2483 dev->watchdog_timeo = 2 * HZ; 2483 dev->watchdog_timeo = 2 * HZ;
2484 SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); 2484 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2485 dev->netdev_ops = &bcmgenet_netdev_ops; 2485 dev->netdev_ops = &bcmgenet_netdev_ops;
2486 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); 2486 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2487 2487
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4608673beaff..add8d8596084 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
298static int bcmgenet_mii_probe(struct net_device *dev) 298static int bcmgenet_mii_probe(struct net_device *dev)
299{ 299{
300 struct bcmgenet_priv *priv = netdev_priv(dev); 300 struct bcmgenet_priv *priv = netdev_priv(dev);
301 struct device_node *dn = priv->pdev->dev.of_node;
301 struct phy_device *phydev; 302 struct phy_device *phydev;
302 unsigned int phy_flags; 303 unsigned int phy_flags;
303 int ret; 304 int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
307 return 0; 308 return 0;
308 } 309 }
309 310
310 if (priv->phy_dn) 311 /* In the case of a fixed PHY, the DT node associated
311 phydev = of_phy_connect(dev, priv->phy_dn, 312 * to the PHY is the Ethernet MAC DT node.
312 bcmgenet_mii_setup, 0, 313 */
313 priv->phy_interface); 314 if (of_phy_is_fixed_link(dn)) {
314 else 315 ret = of_phy_register_fixed_link(dn);
315 phydev = of_phy_connect_fixed_link(dev, 316 if (ret)
316 bcmgenet_mii_setup, 317 return ret;
317 priv->phy_interface); 318
319 priv->phy_dn = dn;
320 }
318 321
322 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
323 priv->phy_interface);
319 if (!phydev) { 324 if (!phydev) {
320 pr_err("could not attach to PHY\n"); 325 pr_err("could not attach to PHY\n");
321 return -ENODEV; 326 return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e5d95c5ce1ad..df2792d8383d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation. 7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 136 97#define TG3_MIN_NUM 137
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "Jan 03, 2014" 100#define DRV_MODULE_RELDATE "May 11, 2014"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3224 return 0; 3224 return 0;
3225} 3225}
3226 3226
3227#define NVRAM_CMD_TIMEOUT 10000 3227#define NVRAM_CMD_TIMEOUT 100
3228 3228
3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230{ 3230{
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
7871 return NETDEV_TX_OK; 7871 return NETDEV_TX_OK;
7872} 7872}
7873 7873
7874/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 7874/* hard_start_xmit for all devices */
7875 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7876 */
7877static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7875static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7878{ 7876{
7879 struct tg3 *tp = netdev_priv(dev); 7877 struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7884 struct tg3_napi *tnapi; 7882 struct tg3_napi *tnapi;
7885 struct netdev_queue *txq; 7883 struct netdev_queue *txq;
7886 unsigned int last; 7884 unsigned int last;
7885 struct iphdr *iph = NULL;
7886 struct tcphdr *tcph = NULL;
7887 __sum16 tcp_csum = 0, ip_csum = 0;
7888 __be16 ip_tot_len = 0;
7887 7889
7888 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7890 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7889 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7891 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7915 7917
7916 mss = skb_shinfo(skb)->gso_size; 7918 mss = skb_shinfo(skb)->gso_size;
7917 if (mss) { 7919 if (mss) {
7918 struct iphdr *iph;
7919 u32 tcp_opt_len, hdr_len; 7920 u32 tcp_opt_len, hdr_len;
7920 7921
7921 if (skb_cow_head(skb, 0)) 7922 if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7927 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7928 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7928 7929
7929 if (!skb_is_gso_v6(skb)) { 7930 if (!skb_is_gso_v6(skb)) {
7931 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7932 tg3_flag(tp, TSO_BUG))
7933 return tg3_tso_bug(tp, skb);
7934
7935 ip_csum = iph->check;
7936 ip_tot_len = iph->tot_len;
7930 iph->check = 0; 7937 iph->check = 0;
7931 iph->tot_len = htons(mss + hdr_len); 7938 iph->tot_len = htons(mss + hdr_len);
7932 } 7939 }
7933 7940
7934 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7935 tg3_flag(tp, TSO_BUG))
7936 return tg3_tso_bug(tp, skb);
7937
7938 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7941 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7939 TXD_FLAG_CPU_POST_DMA); 7942 TXD_FLAG_CPU_POST_DMA);
7940 7943
7944 tcph = tcp_hdr(skb);
7945 tcp_csum = tcph->check;
7946
7941 if (tg3_flag(tp, HW_TSO_1) || 7947 if (tg3_flag(tp, HW_TSO_1) ||
7942 tg3_flag(tp, HW_TSO_2) || 7948 tg3_flag(tp, HW_TSO_2) ||
7943 tg3_flag(tp, HW_TSO_3)) { 7949 tg3_flag(tp, HW_TSO_3)) {
7944 tcp_hdr(skb)->check = 0; 7950 tcph->check = 0;
7945 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7951 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7946 } else 7952 } else {
7947 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 7953 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7948 iph->daddr, 0, 7954 0, IPPROTO_TCP, 0);
7949 IPPROTO_TCP, 7955 }
7950 0);
7951 7956
7952 if (tg3_flag(tp, HW_TSO_3)) { 7957 if (tg3_flag(tp, HW_TSO_3)) {
7953 mss |= (hdr_len & 0xc) << 12; 7958 mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8047 if (would_hit_hwbug) { 8052 if (would_hit_hwbug) {
8048 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8053 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8049 8054
8055 if (mss) {
8056 /* If it's a TSO packet, do GSO instead of
8057 * allocating and copying to a large linear SKB
8058 */
8059 if (ip_tot_len) {
8060 iph->check = ip_csum;
8061 iph->tot_len = ip_tot_len;
8062 }
8063 tcph->check = tcp_csum;
8064 return tg3_tso_bug(tp, skb);
8065 }
8066
8050 /* If the workaround fails due to memory/mapping 8067 /* If the workaround fails due to memory/mapping
8051 * failure, silently drop this packet. 8068 * failure, silently drop this packet.
8052 */ 8069 */
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
11876static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11893static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11877{ 11894{
11878 struct tg3 *tp = netdev_priv(dev); 11895 struct tg3 *tp = netdev_priv(dev);
11879 int ret; 11896 int ret, cpmu_restore = 0;
11880 u8 *pd; 11897 u8 *pd;
11881 u32 i, offset, len, b_offset, b_count; 11898 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11882 __be32 val; 11899 __be32 val;
11883 11900
11884 if (tg3_flag(tp, NO_NVRAM)) 11901 if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11890 11907
11891 eeprom->magic = TG3_EEPROM_MAGIC; 11908 eeprom->magic = TG3_EEPROM_MAGIC;
11892 11909
11910 /* Override clock, link aware and link idle modes */
11911 if (tg3_flag(tp, CPMU_PRESENT)) {
11912 cpmu_val = tr32(TG3_CPMU_CTRL);
11913 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11914 CPMU_CTRL_LINK_IDLE_MODE)) {
11915 tw32(TG3_CPMU_CTRL, cpmu_val &
11916 ~(CPMU_CTRL_LINK_AWARE_MODE |
11917 CPMU_CTRL_LINK_IDLE_MODE));
11918 cpmu_restore = 1;
11919 }
11920 }
11921 tg3_override_clk(tp);
11922
11893 if (offset & 3) { 11923 if (offset & 3) {
11894 /* adjustments to start on required 4 byte boundary */ 11924 /* adjustments to start on required 4 byte boundary */
11895 b_offset = offset & 3; 11925 b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11900 } 11930 }
11901 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 11931 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11902 if (ret) 11932 if (ret)
11903 return ret; 11933 goto eeprom_done;
11904 memcpy(data, ((char *)&val) + b_offset, b_count); 11934 memcpy(data, ((char *)&val) + b_offset, b_count);
11905 len -= b_count; 11935 len -= b_count;
11906 offset += b_count; 11936 offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11912 for (i = 0; i < (len - (len & 3)); i += 4) { 11942 for (i = 0; i < (len - (len & 3)); i += 4) {
11913 ret = tg3_nvram_read_be32(tp, offset + i, &val); 11943 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11914 if (ret) { 11944 if (ret) {
11945 if (i)
11946 i -= 4;
11915 eeprom->len += i; 11947 eeprom->len += i;
11916 return ret; 11948 goto eeprom_done;
11917 } 11949 }
11918 memcpy(pd + i, &val, 4); 11950 memcpy(pd + i, &val, 4);
11951 if (need_resched()) {
11952 if (signal_pending(current)) {
11953 eeprom->len += i;
11954 ret = -EINTR;
11955 goto eeprom_done;
11956 }
11957 cond_resched();
11958 }
11919 } 11959 }
11920 eeprom->len += i; 11960 eeprom->len += i;
11921 11961
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11926 b_offset = offset + len - b_count; 11966 b_offset = offset + len - b_count;
11927 ret = tg3_nvram_read_be32(tp, b_offset, &val); 11967 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11928 if (ret) 11968 if (ret)
11929 return ret; 11969 goto eeprom_done;
11930 memcpy(pd, &val, b_count); 11970 memcpy(pd, &val, b_count);
11931 eeprom->len += b_count; 11971 eeprom->len += b_count;
11932 } 11972 }
11933 return 0; 11973 ret = 0;
11974
11975eeprom_done:
11976 /* Restore clock, link aware and link idle modes */
11977 tg3_restore_clk(tp);
11978 if (cpmu_restore)
11979 tw32(TG3_CPMU_CTRL, cpmu_val);
11980
11981 return ret;
11934} 11982}
11935 11983
11936static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11984static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
@@ -12484,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12484 return size; 12532 return size;
12485} 12533}
12486 12534
12487static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) 12535static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
12488{ 12536{
12489 struct tg3 *tp = netdev_priv(dev); 12537 struct tg3 *tp = netdev_priv(dev);
12490 int i; 12538 int i;
@@ -12495,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12495 return 0; 12543 return 0;
12496} 12544}
12497 12545
12498static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) 12546static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
12499{ 12547{
12500 struct tg3 *tp = netdev_priv(dev); 12548 struct tg3 *tp = netdev_priv(dev);
12501 size_t i; 12549 size_t i;
@@ -14027,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
14027 .get_sset_count = tg3_get_sset_count, 14075 .get_sset_count = tg3_get_sset_count,
14028 .get_rxnfc = tg3_get_rxnfc, 14076 .get_rxnfc = tg3_get_rxnfc,
14029 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14077 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14030 .get_rxfh_indir = tg3_get_rxfh_indir, 14078 .get_rxfh = tg3_get_rxfh,
14031 .set_rxfh_indir = tg3_set_rxfh_indir, 14079 .set_rxfh = tg3_set_rxfh,
14032 .get_channels = tg3_get_channels, 14080 .get_channels = tg3_get_channels,
14033 .set_channels = tg3_set_channels, 14081 .set_channels = tg3_set_channels,
14034 .get_ts_info = tg3_get_ts_info, 14082 .get_ts_info = tg3_get_ts_info,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 04321e5a356e..461accaf0aa4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2013 Broadcom Corporation. 7 * Copyright (C) 2007-2014 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index f9e150825bb5..882cad71ad62 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -266,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
266 ethtool_cmd_speed_set(cmd, SPEED_10000); 266 ethtool_cmd_speed_set(cmd, SPEED_10000);
267 cmd->duplex = DUPLEX_FULL; 267 cmd->duplex = DUPLEX_FULL;
268 } else { 268 } else {
269 ethtool_cmd_speed_set(cmd, -1); 269 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
270 cmd->duplex = -1; 270 cmd->duplex = DUPLEX_UNKNOWN;
271 } 271 }
272 cmd->transceiver = XCVR_EXTERNAL; 272 cmd->transceiver = XCVR_EXTERNAL;
273 cmd->maxtxpkt = 0; 273 cmd->maxtxpkt = 0;
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
1137void 1137void
1138bnad_set_ethtool_ops(struct net_device *netdev) 1138bnad_set_ethtool_ops(struct net_device *netdev)
1139{ 1139{
1140 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); 1140 netdev->ethtool_ops = &bnad_ethtool_ops;
1141} 1141}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 521dfea44b83..25d6b2a10e4e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
1737 platform_set_drvdata(pdev, ndev); 1737 platform_set_drvdata(pdev, ndev);
1738 ether_setup(ndev); 1738 ether_setup(ndev);
1739 ndev->netdev_ops = &xgmac_netdev_ops; 1739 ndev->netdev_ops = &xgmac_netdev_ops;
1740 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1740 ndev->ethtool_ops = &xgmac_ethtool_ops;
1741 spin_lock_init(&priv->stats_lock); 1741 spin_lock_init(&priv->stats_lock);
1742 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); 1742 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1743 1743
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 05613a85ce61..186566bfdbc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -580,8 +580,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
580 ethtool_cmd_speed_set(cmd, p->link_config.speed); 580 ethtool_cmd_speed_set(cmd, p->link_config.speed);
581 cmd->duplex = p->link_config.duplex; 581 cmd->duplex = p->link_config.duplex;
582 } else { 582 } else {
583 ethtool_cmd_speed_set(cmd, -1); 583 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
584 cmd->duplex = -1; 584 cmd->duplex = DUPLEX_UNKNOWN;
585 } 585 }
586 586
587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1100 1100
1101 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1101 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1102 1102
1103 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1103 netdev->ethtool_ops = &t1_ethtool_ops;
1104 } 1104 }
1105 1105
1106 if (t1_init_sw_modules(adapter, bi) < 0) { 1106 if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 07bbb711b7e5..5d9cce053cc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1809,8 +1809,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1809 ethtool_cmd_speed_set(cmd, p->link_config.speed); 1809 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1810 cmd->duplex = p->link_config.duplex; 1810 cmd->duplex = p->link_config.duplex;
1811 } else { 1811 } else {
1812 ethtool_cmd_speed_set(cmd, -1); 1812 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1813 cmd->duplex = -1; 1813 cmd->duplex = DUPLEX_UNKNOWN;
1814 } 1814 }
1815 1815
1816 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1816 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3291 netdev->features |= NETIF_F_HIGHDMA; 3291 netdev->features |= NETIF_F_HIGHDMA;
3292 3292
3293 netdev->netdev_ops = &cxgb_netdev_ops; 3293 netdev->netdev_ops = &cxgb_netdev_ops;
3294 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 3294 netdev->ethtool_ops = &cxgb_ethtool_ops;
3295 } 3295 }
3296 3296
3297 pci_set_drvdata(pdev, adapter); 3297 pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index c0a9dd55f4e5..b0cbb2b7fd48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
185 if (ether_addr_equal(dev->dev_addr, mac)) { 185 if (ether_addr_equal(dev->dev_addr, mac)) {
186 rcu_read_lock(); 186 rcu_read_lock();
187 if (vlan && vlan != VLAN_VID_MASK) { 187 if (vlan && vlan != VLAN_VID_MASK) {
188 dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan); 188 dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
189 } else if (netif_is_bond_slave(dev)) { 189 } else if (netif_is_bond_slave(dev)) {
190 struct net_device *upper_dev; 190 struct net_device *upper_dev;
191 191
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db37709263..f503dce4ab17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ 357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
360 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
361 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
360}; 362};
361 363
362enum { 364enum {
363 MAX_EGRQ = 128, /* max # of egress queues, including FLs */ 365 INGQ_EXTRAS = 2, /* firmware event queue and */
364 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ 366 /* forwarded interrupts */
367 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
368 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
369 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
370 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
365}; 371};
366 372
367struct adapter; 373struct adapter;
@@ -538,6 +544,7 @@ struct sge {
538 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 544 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
539 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; 545 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
540 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 546 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
547 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
541 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 548 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
542 549
543 struct sge_rspq intrq ____cacheline_aligned_in_smp; 550 struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
548 u16 ethtxq_rover; /* Tx queue to clean up next */ 555 u16 ethtxq_rover; /* Tx queue to clean up next */
549 u16 ofldqsets; /* # of active offload queue sets */ 556 u16 ofldqsets; /* # of active offload queue sets */
550 u16 rdmaqs; /* # of available RDMA Rx queues */ 557 u16 rdmaqs; /* # of available RDMA Rx queues */
558 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
551 u16 ofld_rxq[MAX_OFLD_QSETS]; 559 u16 ofld_rxq[MAX_OFLD_QSETS];
552 u16 rdma_rxq[NCHAN]; 560 u16 rdma_rxq[NCHAN];
561 u16 rdma_ciq[NCHAN];
553 u16 timer_val[SGE_NTIMERS]; 562 u16 timer_val[SGE_NTIMERS];
554 u8 counter_val[SGE_NCOUNTERS]; 563 u8 counter_val[SGE_NCOUNTERS];
555 u32 fl_pg_order; /* large page allocation size */ 564 u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
577#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 586#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
578#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) 587#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
579#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 588#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
589#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
580 590
581struct l2t_data; 591struct l2t_data;
582 592
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..2f8d6b910383 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
818 for_each_rdmarxq(&adap->sge, i) 818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i); 820 adap->port[0]->name, i);
821
822 for_each_rdmaciq(&adap->sge, i)
823 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
824 adap->port[0]->name, i);
821} 825}
822 826
823static int request_msix_queue_irqs(struct adapter *adap) 827static int request_msix_queue_irqs(struct adapter *adap)
824{ 828{
825 struct sge *s = &adap->sge; 829 struct sge *s = &adap->sge;
826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; 830 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
831 int msi_index = 2;
827 832
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 833 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq); 834 adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
857 goto unwind; 862 goto unwind;
858 msi_index++; 863 msi_index++;
859 } 864 }
865 for_each_rdmaciq(s, rdmaciqqidx) {
866 err = request_irq(adap->msix_info[msi_index].vec,
867 t4_sge_intr_msix, 0,
868 adap->msix_info[msi_index].desc,
869 &s->rdmaciq[rdmaciqqidx].rspq);
870 if (err)
871 goto unwind;
872 msi_index++;
873 }
860 return 0; 874 return 0;
861 875
862unwind: 876unwind:
877 while (--rdmaciqqidx >= 0)
878 free_irq(adap->msix_info[--msi_index].vec,
879 &s->rdmaciq[rdmaciqqidx].rspq);
863 while (--rdmaqidx >= 0) 880 while (--rdmaqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec, 881 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmarxq[rdmaqidx].rspq); 882 &s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 902 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
886 for_each_rdmarxq(s, i) 903 for_each_rdmarxq(s, i)
887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 904 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
905 for_each_rdmaciq(s, i)
906 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
888} 907}
889 908
890/** 909/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
1047 if (msi_idx > 0) 1066 if (msi_idx > 0)
1048 msi_idx++; 1067 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, 1068 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler); 1069 q->fl.size ? &q->fl : NULL,
1070 uldrx_handler);
1051 if (err) 1071 if (err)
1052 goto freeout; 1072 goto freeout;
1053 memset(&q->stats, 0, sizeof(q->stats)); 1073 memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
1064 if (msi_idx > 0) 1084 if (msi_idx > 0)
1065 msi_idx++; 1085 msi_idx++;
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 1086 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler); 1087 msi_idx, q->fl.size ? &q->fl : NULL,
1088 uldrx_handler);
1068 if (err) 1089 if (err)
1069 goto freeout; 1090 goto freeout;
1070 memset(&q->stats, 0, sizeof(q->stats)); 1091 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id; 1092 s->rdma_rxq[i] = q->rspq.abs_id;
1072 } 1093 }
1073 1094
1095 for_each_rdmaciq(s, i) {
1096 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1097
1098 if (msi_idx > 0)
1099 msi_idx++;
1100 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1101 msi_idx, q->fl.size ? &q->fl : NULL,
1102 uldrx_handler);
1103 if (err)
1104 goto freeout;
1105 memset(&q->stats, 0, sizeof(q->stats));
1106 s->rdma_ciq[i] = q->rspq.abs_id;
1107 }
1108
1074 for_each_port(adap, i) { 1109 for_each_port(adap, i) {
1075 /* 1110 /*
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't 1111 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -2252,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2252 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 2287 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2253 p->port_type == FW_PORT_TYPE_FIBER_XAUI) 2288 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2254 cmd->port = PORT_FIBRE; 2289 cmd->port = PORT_FIBRE;
2255 else if (p->port_type == FW_PORT_TYPE_SFP) { 2290 else if (p->port_type == FW_PORT_TYPE_SFP ||
2256 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 2291 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2257 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 2292 p->port_type == FW_PORT_TYPE_QSFP) {
2293 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2294 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2295 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2296 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2297 cmd->port = PORT_FIBRE;
2298 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2299 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2258 cmd->port = PORT_DA; 2300 cmd->port = PORT_DA;
2259 else 2301 else
2260 cmd->port = PORT_FIBRE; 2302 cmd->port = PORT_OTHER;
2261 } else 2303 } else
2262 cmd->port = PORT_OTHER; 2304 cmd->port = PORT_OTHER;
2263 2305
@@ -2461,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
2461} 2503}
2462 2504
2463/** 2505/**
2464 * set_rxq_intr_params - set a queue's interrupt holdoff parameters 2506 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2465 * @adap: the adapter
2466 * @q: the Rx queue 2507 * @q: the Rx queue
2467 * @us: the hold-off time in us, or 0 to disable timer 2508 * @us: the hold-off time in us, or 0 to disable timer
2468 * @cnt: the hold-off packet count, or 0 to disable counter 2509 * @cnt: the hold-off packet count, or 0 to disable counter
@@ -2470,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
2470 * Sets an Rx queue's interrupt hold-off time and packet count. At least 2511 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2471 * one of the two needs to be enabled for the queue to generate interrupts. 2512 * one of the two needs to be enabled for the queue to generate interrupts.
2472 */ 2513 */
2473static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, 2514static int set_rspq_intr_params(struct sge_rspq *q,
2474 unsigned int us, unsigned int cnt) 2515 unsigned int us, unsigned int cnt)
2475{ 2516{
2517 struct adapter *adap = q->adap;
2518
2476 if ((us | cnt) == 0) 2519 if ((us | cnt) == 0)
2477 cnt = 1; 2520 cnt = 1;
2478 2521
@@ -2499,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2499 return 0; 2542 return 0;
2500} 2543}
2501 2544
2502static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2545/**
2546 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2547 * @dev: the network device
2548 * @us: the hold-off time in us, or 0 to disable timer
2549 * @cnt: the hold-off packet count, or 0 to disable counter
2550 *
2551 * Set the RX interrupt hold-off parameters for a network device.
2552 */
2553static int set_rx_intr_params(struct net_device *dev,
2554 unsigned int us, unsigned int cnt)
2503{ 2555{
2504 const struct port_info *pi = netdev_priv(dev); 2556 int i, err;
2557 struct port_info *pi = netdev_priv(dev);
2505 struct adapter *adap = pi->adapter; 2558 struct adapter *adap = pi->adapter;
2506 struct sge_rspq *q; 2559 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2507 int i; 2560
2508 int r = 0; 2561 for (i = 0; i < pi->nqsets; i++, q++) {
2509 2562 err = set_rspq_intr_params(&q->rspq, us, cnt);
2510 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) { 2563 if (err)
2511 q = &adap->sge.ethrxq[i].rspq; 2564 return err;
2512 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2513 c->rx_max_coalesced_frames);
2514 if (r) {
2515 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2516 break;
2517 }
2518 } 2565 }
2519 return r; 2566 return 0;
2567}
2568
2569static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2570{
2571 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2572 c->rx_max_coalesced_frames);
2520} 2573}
2521 2574
2522static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2575static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2732,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev)
2732 return pi->rss_size; 2785 return pi->rss_size;
2733} 2786}
2734 2787
2735static int get_rss_table(struct net_device *dev, u32 *p) 2788static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2736{ 2789{
2737 const struct port_info *pi = netdev_priv(dev); 2790 const struct port_info *pi = netdev_priv(dev);
2738 unsigned int n = pi->rss_size; 2791 unsigned int n = pi->rss_size;
@@ -2742,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p)
2742 return 0; 2795 return 0;
2743} 2796}
2744 2797
2745static int set_rss_table(struct net_device *dev, const u32 *p) 2798static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2746{ 2799{
2747 unsigned int i; 2800 unsigned int i;
2748 struct port_info *pi = netdev_priv(dev); 2801 struct port_info *pi = netdev_priv(dev);
@@ -2844,8 +2897,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2844 .set_wol = set_wol, 2897 .set_wol = set_wol,
2845 .get_rxnfc = get_rxnfc, 2898 .get_rxnfc = get_rxnfc,
2846 .get_rxfh_indir_size = get_rss_table_size, 2899 .get_rxfh_indir_size = get_rss_table_size,
2847 .get_rxfh_indir = get_rss_table, 2900 .get_rxfh = get_rss_table,
2848 .set_rxfh_indir = set_rss_table, 2901 .set_rxfh = set_rss_table,
2849 .flash_device = set_flash, 2902 .flash_device = set_flash,
2850}; 2903};
2851 2904
@@ -3386,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3386EXPORT_SYMBOL(cxgb4_best_mtu); 3439EXPORT_SYMBOL(cxgb4_best_mtu);
3387 3440
3388/** 3441/**
3442 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3443 * @mtus: the HW MTU table
3444 * @header_size: Header Size
3445 * @data_size_max: maximum Data Segment Size
3446 * @data_size_align: desired Data Segment Size Alignment (2^N)
3447 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3448 *
3449 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3450 * MTU Table based solely on a Maximum MTU parameter, we break that
3451 * parameter up into a Header Size and Maximum Data Segment Size, and
3452 * provide a desired Data Segment Size Alignment. If we find an MTU in
3453 * the Hardware MTU Table which will result in a Data Segment Size with
3454 * the requested alignment _and_ that MTU isn't "too far" from the
3455 * closest MTU, then we'll return that rather than the closest MTU.
3456 */
3457unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3458 unsigned short header_size,
3459 unsigned short data_size_max,
3460 unsigned short data_size_align,
3461 unsigned int *mtu_idxp)
3462{
3463 unsigned short max_mtu = header_size + data_size_max;
3464 unsigned short data_size_align_mask = data_size_align - 1;
3465 int mtu_idx, aligned_mtu_idx;
3466
3467 /* Scan the MTU Table till we find an MTU which is larger than our
3468 * Maximum MTU or we reach the end of the table. Along the way,
3469 * record the last MTU found, if any, which will result in a Data
3470 * Segment Length matching the requested alignment.
3471 */
3472 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3473 unsigned short data_size = mtus[mtu_idx] - header_size;
3474
3475 /* If this MTU minus the Header Size would result in a
3476 * Data Segment Size of the desired alignment, remember it.
3477 */
3478 if ((data_size & data_size_align_mask) == 0)
3479 aligned_mtu_idx = mtu_idx;
3480
3481 /* If we're not at the end of the Hardware MTU Table and the
3482 * next element is larger than our Maximum MTU, drop out of
3483 * the loop.
3484 */
3485 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3486 break;
3487 }
3488
3489 /* If we fell out of the loop because we ran to the end of the table,
3490 * then we just have to use the last [largest] entry.
3491 */
3492 if (mtu_idx == NMTUS)
3493 mtu_idx--;
3494
3495 /* If we found an MTU which resulted in the requested Data Segment
3496 * Length alignment and that's "not far" from the largest MTU which is
3497 * less than or equal to the maximum MTU, then use that.
3498 */
3499 if (aligned_mtu_idx >= 0 &&
3500 mtu_idx - aligned_mtu_idx <= 1)
3501 mtu_idx = aligned_mtu_idx;
3502
3503 /* If the caller has passed in an MTU Index pointer, pass the
3504 * MTU Index back. Return the MTU value.
3505 */
3506 if (mtu_idxp)
3507 *mtu_idxp = mtu_idx;
3508 return mtus[mtu_idx];
3509}
3510EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3511
3512/**
3389 * cxgb4_port_chan - get the HW channel of a port 3513 * cxgb4_port_chan - get the HW channel of a port
3390 * @dev: the net device for the port 3514 * @dev: the net device for the port
3391 * 3515 *
@@ -3782,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3782 lli.mtus = adap->params.mtus; 3906 lli.mtus = adap->params.mtus;
3783 if (uld == CXGB4_ULD_RDMA) { 3907 if (uld == CXGB4_ULD_RDMA) {
3784 lli.rxq_ids = adap->sge.rdma_rxq; 3908 lli.rxq_ids = adap->sge.rdma_rxq;
3909 lli.ciq_ids = adap->sge.rdma_ciq;
3785 lli.nrxq = adap->sge.rdmaqs; 3910 lli.nrxq = adap->sge.rdmaqs;
3911 lli.nciq = adap->sge.rdmaciqs;
3786 } else if (uld == CXGB4_ULD_ISCSI) { 3912 } else if (uld == CXGB4_ULD_ISCSI) {
3787 lli.rxq_ids = adap->sge.ofld_rxq; 3913 lli.rxq_ids = adap->sge.ofld_rxq;
3788 lli.nrxq = adap->sge.ofldqsets; 3914 lli.nrxq = adap->sge.ofldqsets;
@@ -4061,7 +4187,7 @@ static int update_root_dev_clip(struct net_device *dev)
4061 4187
4062 /* Parse all bond and vlan devices layered on top of the physical dev */ 4188 /* Parse all bond and vlan devices layered on top of the physical dev */
4063 for (i = 0; i < VLAN_N_VID; i++) { 4189 for (i = 0; i < VLAN_N_VID; i++) {
4064 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i); 4190 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4065 if (!root_dev) 4191 if (!root_dev)
4066 continue; 4192 continue;
4067 4193
@@ -5528,13 +5654,41 @@ static int adap_init0(struct adapter *adap)
5528#undef FW_PARAM_PFVF 5654#undef FW_PARAM_PFVF
5529#undef FW_PARAM_DEV 5655#undef FW_PARAM_DEV
5530 5656
5531 /* 5657 /* The MTU/MSS Table is initialized by now, so load their values. If
5532 * These are finalized by FW initialization, load their values now. 5658 * we're initializing the adapter, then we'll make any modifications
5659 * we want to the MTU/MSS Table and also initialize the congestion
5660 * parameters.
5533 */ 5661 */
5534 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5662 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5535 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5663 if (state != DEV_STATE_INIT) {
5536 adap->params.b_wnd); 5664 int i;
5665
5666 /* The default MTU Table contains values 1492 and 1500.
5667 * However, for TCP, it's better to have two values which are
5668 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5669 * This allows us to have a TCP Data Payload which is a
5670 * multiple of 8 regardless of what combination of TCP Options
5671 * are in use (always a multiple of 4 bytes) which is
5672 * important for performance reasons. For instance, if no
5673 * options are in use, then we have a 20-byte IP header and a
5674 * 20-byte TCP header. In this case, a 1500-byte MSS would
5675 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5676 * which is not a multiple of 8. So using an MSS of 1488 in
5677 * this case results in a TCP Data Payload of 1448 bytes which
5678 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5679 * Stamps have been negotiated, then an MTU of 1500 bytes
5680 * results in a TCP Data Payload of 1448 bytes which, as
5681 * above, is a multiple of 8 bytes ...
5682 */
5683 for (i = 0; i < NMTUS; i++)
5684 if (adap->params.mtus[i] == 1492) {
5685 adap->params.mtus[i] = 1488;
5686 break;
5687 }
5537 5688
5689 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5690 adap->params.b_wnd);
5691 }
5538 t4_init_tp_params(adap); 5692 t4_init_tp_params(adap);
5539 adap->flags |= FW_OK; 5693 adap->flags |= FW_OK;
5540 return 0; 5694 return 0;
@@ -5669,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
5669 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; 5823 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5670} 5824}
5671 5825
5672static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, 5826static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5827 unsigned int us, unsigned int cnt,
5673 unsigned int size, unsigned int iqe_size) 5828 unsigned int size, unsigned int iqe_size)
5674{ 5829{
5675 q->intr_params = QINTR_TIMER_IDX(timer_idx) | 5830 q->adap = adap;
5676 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); 5831 set_rspq_intr_params(q, us, cnt);
5677 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5678 q->iqe_len = iqe_size; 5832 q->iqe_len = iqe_size;
5679 q->size = size; 5833 q->size = size;
5680} 5834}
@@ -5688,6 +5842,7 @@ static void cfg_queues(struct adapter *adap)
5688{ 5842{
5689 struct sge *s = &adap->sge; 5843 struct sge *s = &adap->sge;
5690 int i, q10g = 0, n10g = 0, qidx = 0; 5844 int i, q10g = 0, n10g = 0, qidx = 0;
5845 int ciq_size;
5691 5846
5692 for_each_port(adap, i) 5847 for_each_port(adap, i)
5693 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 5848 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5726,12 +5881,13 @@ static void cfg_queues(struct adapter *adap)
5726 s->ofldqsets = adap->params.nports; 5881 s->ofldqsets = adap->params.nports;
5727 /* For RDMA one Rx queue per channel suffices */ 5882 /* For RDMA one Rx queue per channel suffices */
5728 s->rdmaqs = adap->params.nports; 5883 s->rdmaqs = adap->params.nports;
5884 s->rdmaciqs = adap->params.nports;
5729 } 5885 }
5730 5886
5731 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 5887 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5732 struct sge_eth_rxq *r = &s->ethrxq[i]; 5888 struct sge_eth_rxq *r = &s->ethrxq[i];
5733 5889
5734 init_rspq(&r->rspq, 0, 0, 1024, 64); 5890 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5735 r->fl.size = 72; 5891 r->fl.size = 72;
5736 } 5892 }
5737 5893
@@ -5747,7 +5903,7 @@ static void cfg_queues(struct adapter *adap)
5747 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { 5903 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5748 struct sge_ofld_rxq *r = &s->ofldrxq[i]; 5904 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5749 5905
5750 init_rspq(&r->rspq, 0, 0, 1024, 64); 5906 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
5751 r->rspq.uld = CXGB4_ULD_ISCSI; 5907 r->rspq.uld = CXGB4_ULD_ISCSI;
5752 r->fl.size = 72; 5908 r->fl.size = 72;
5753 } 5909 }
@@ -5755,13 +5911,26 @@ static void cfg_queues(struct adapter *adap)
5755 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 5911 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5756 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 5912 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5757 5913
5758 init_rspq(&r->rspq, 0, 0, 511, 64); 5914 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
5759 r->rspq.uld = CXGB4_ULD_RDMA; 5915 r->rspq.uld = CXGB4_ULD_RDMA;
5760 r->fl.size = 72; 5916 r->fl.size = 72;
5761 } 5917 }
5762 5918
5763 init_rspq(&s->fw_evtq, 6, 0, 512, 64); 5919 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5764 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); 5920 if (ciq_size > SGE_MAX_IQ_SIZE) {
5921 CH_WARN(adap, "CIQ size too small for available IQs\n");
5922 ciq_size = SGE_MAX_IQ_SIZE;
5923 }
5924
5925 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5926 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5927
5928 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
5929 r->rspq.uld = CXGB4_ULD_RDMA;
5930 }
5931
5932 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5933 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
5765} 5934}
5766 5935
5767/* 5936/*
@@ -5808,9 +5977,9 @@ static int enable_msix(struct adapter *adap)
5808 5977
5809 want = s->max_ethqsets + EXTRA_VECS; 5978 want = s->max_ethqsets + EXTRA_VECS;
5810 if (is_offload(adap)) { 5979 if (is_offload(adap)) {
5811 want += s->rdmaqs + s->ofldqsets; 5980 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
5812 /* need nchan for each possible ULD */ 5981 /* need nchan for each possible ULD */
5813 ofld_need = 2 * nchan; 5982 ofld_need = 3 * nchan;
5814 } 5983 }
5815 need = adap->params.nports + EXTRA_VECS + ofld_need; 5984 need = adap->params.nports + EXTRA_VECS + ofld_need;
5816 5985
@@ -6076,7 +6245,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6076 netdev->priv_flags |= IFF_UNICAST_FLT; 6245 netdev->priv_flags |= IFF_UNICAST_FLT;
6077 6246
6078 netdev->netdev_ops = &cxgb4_netdev_ops; 6247 netdev->netdev_ops = &cxgb4_netdev_ops;
6079 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 6248 netdev->ethtool_ops = &cxgb_ethtool_ops;
6080 } 6249 }
6081 6250
6082 pci_set_drvdata(pdev, adapter); 6251 pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a047528f..55e9daf7f9d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
232 const struct cxgb4_virt_res *vr; /* assorted HW resources */ 232 const struct cxgb4_virt_res *vr; /* assorted HW resources */
233 const unsigned short *mtus; /* MTU table */ 233 const unsigned short *mtus; /* MTU table */
234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ 234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
235 const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
235 unsigned short nrxq; /* # of Rx queues */ 236 unsigned short nrxq; /* # of Rx queues */
236 unsigned short ntxq; /* # of Tx queues */ 237 unsigned short ntxq; /* # of Tx queues */
238 unsigned short nciq; /* # of concentrator IQ */
237 unsigned char nchan:4; /* # of channels */ 239 unsigned char nchan:4; /* # of channels */
238 unsigned char nports:4; /* # of ports */ 240 unsigned char nports:4; /* # of ports */
239 unsigned char wr_cred; /* WR 16-byte credits */ 241 unsigned char wr_cred; /* WR 16-byte credits */
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
274unsigned int cxgb4_port_idx(const struct net_device *dev); 276unsigned int cxgb4_port_idx(const struct net_device *dev);
275unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 277unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
276 unsigned int *idx); 278 unsigned int *idx);
279unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
280 unsigned short header_size,
281 unsigned short data_size_max,
282 unsigned short data_size_align,
283 unsigned int *mtu_idxp);
277void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 284void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
278 struct tp_tcp_stats *v6); 285 struct tp_tcp_stats *v6);
279void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, 286void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e249528c8e60..dd4355d248e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1697 return handle_trace_pkt(q->adap, si); 1697 return handle_trace_pkt(q->adap, si);
1698 1698
1699 pkt = (const struct cpl_rx_pkt *)rsp; 1699 pkt = (const struct cpl_rx_pkt *)rsp;
1700 csum_ok = pkt->csum_calc && !pkt->err_vec; 1700 csum_ok = pkt->csum_calc && !pkt->err_vec &&
1701 (q->netdev->features & NETIF_F_RXCSUM);
1701 if ((pkt->l2info & htonl(RXF_TCP)) && 1702 if ((pkt->l2info & htonl(RXF_TCP)) &&
1702 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1703 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1703 do_gro(rxq, si, pkt); 1704 do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1720 1721
1721 rxq->stats.pkts++; 1722 rxq->stats.pkts++;
1722 1723
1723 if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) && 1724 if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1724 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1725 if (!pkt->ip_frag) { 1725 if (!pkt->ip_frag) {
1726 skb->ip_summed = CHECKSUM_UNNECESSARY; 1726 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 rxq->stats.rx_cso++; 1727 rxq->stats.rx_cso++;
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2215 iq->cntxt_id = ntohs(c.iqid); 2215 iq->cntxt_id = ntohs(c.iqid);
2216 iq->abs_id = ntohs(c.physiqid); 2216 iq->abs_id = ntohs(c.physiqid);
2217 iq->size--; /* subtract status entry */ 2217 iq->size--; /* subtract status entry */
2218 iq->adap = adap;
2219 iq->netdev = dev; 2218 iq->netdev = dev;
2220 iq->handler = hnd; 2219 iq->handler = hnd;
2221 2220
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
2515 if (oq->rspq.desc) 2514 if (oq->rspq.desc)
2516 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2515 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2517 } 2516 }
2517 for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
2518 if (oq->rspq.desc)
2519 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2520 }
2518 2521
2519 /* clean up offload Tx queues */ 2522 /* clean up offload Tx queues */
2520 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { 2523 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623be9f1e..71b799b5b0f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ 68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ 69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ 70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
71 SGE_MAX_IQ_SIZE = 65520,
71 72
72 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ 73 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
73 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ 74 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f2738c710789..973eb11aa98a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
227#define DELACK(x) ((x) << 5) 227#define DELACK(x) ((x) << 5)
228#define ULP_MODE(x) ((x) << 8) 228#define ULP_MODE(x) ((x) << 8)
229#define RCV_BUFSIZ(x) ((x) << 12) 229#define RCV_BUFSIZ(x) ((x) << 12)
230#define RCV_BUFSIZ_MASK 0x3FFU
230#define DSCP(x) ((x) << 22) 231#define DSCP(x) ((x) << 22)
231#define SMAC_SEL(x) ((u64)(x) << 28) 232#define SMAC_SEL(x) ((u64)(x) << 28)
232#define L2T_IDX(x) ((u64)(x) << 36) 233#define L2T_IDX(x) ((u64)(x) << 36)
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
278 __be64 opt0; 279 __be64 opt0;
279}; 280};
280 281
282struct cpl_t5_pass_accept_rpl {
283 WR_HDR;
284 union opcode_tid ot;
285 __be32 opt2;
286 __be64 opt0;
287 __be32 iss;
288 __be32 rsvd;
289};
290
281struct cpl_act_open_req { 291struct cpl_act_open_req {
282 WR_HDR; 292 WR_HDR;
283 union opcode_tid ot; 293 union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 52859288de7b..ff1cdd1788b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2664 netdev->priv_flags |= IFF_UNICAST_FLT; 2664 netdev->priv_flags |= IFF_UNICAST_FLT;
2665 2665
2666 netdev->netdev_ops = &cxgb4vf_netdev_ops; 2666 netdev->netdev_ops = &cxgb4vf_netdev_ops;
2667 SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops); 2667 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
2668 2668
2669 /* 2669 /*
2670 * Initialize the hardware/software state for the port. 2670 * Initialize the hardware/software state for the port.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9d88c1d50b49..bdfa80ca5e31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1510{ 1510{
1511 struct sk_buff *skb; 1511 struct sk_buff *skb;
1512 const struct cpl_rx_pkt *pkt = (void *)rsp; 1512 const struct cpl_rx_pkt *pkt = (void *)rsp;
1513 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1513 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1514 (rspq->netdev->features & NETIF_F_RXCSUM);
1514 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1515 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1515 1516
1516 /* 1517 /*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1538 skb_record_rx_queue(skb, rspq->idx); 1539 skb_record_rx_queue(skb, rspq->idx);
1539 rxq->stats.pkts++; 1540 rxq->stats.pkts++;
1540 1541
1541 if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && 1542 if (csum_ok && !pkt->err_vec &&
1542 !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1543 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1543 if (!pkt->ip_frag) 1544 if (!pkt->ip_frag)
1544 skb->ip_summed = CHECKSUM_UNNECESSARY; 1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 else { 1546 else {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0202ad..14f465f239d6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
43#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 43#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
44#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 44#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
45 45
46#define ENIC_AIC_LARGE_PKT_DIFF 3
47
46struct enic_msix_entry { 48struct enic_msix_entry {
47 int requested; 49 int requested;
48 char devname[IFNAMSIZ]; 50 char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
50 void *devid; 52 void *devid;
51}; 53};
52 54
55/* Store only the lower range. Higher range is given by fw. */
56struct enic_intr_mod_range {
57 u32 small_pkt_range_start;
58 u32 large_pkt_range_start;
59};
60
61struct enic_intr_mod_table {
62 u32 rx_rate;
63 u32 range_percent;
64};
65
66#define ENIC_MAX_LINK_SPEEDS 3
67#define ENIC_LINK_SPEED_10G 10000
68#define ENIC_LINK_SPEED_4G 4000
69#define ENIC_LINK_40G_INDEX 2
70#define ENIC_LINK_10G_INDEX 1
71#define ENIC_LINK_4G_INDEX 0
72#define ENIC_RX_COALESCE_RANGE_END 125
73#define ENIC_AIC_TS_BREAK 100
74
75struct enic_rx_coal {
76 u32 small_pkt_range_start;
77 u32 large_pkt_range_start;
78 u32 range_end;
79 u32 use_adaptive_rx_coalesce;
80};
81
53/* priv_flags */ 82/* priv_flags */
54#define ENIC_SRIOV_ENABLED (1 << 0) 83#define ENIC_SRIOV_ENABLED (1 << 0)
55 84
@@ -85,13 +114,12 @@ struct enic {
85 u32 msg_enable; 114 u32 msg_enable;
86 spinlock_t devcmd_lock; 115 spinlock_t devcmd_lock;
87 u8 mac_addr[ETH_ALEN]; 116 u8 mac_addr[ETH_ALEN];
88 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
89 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
90 unsigned int flags; 117 unsigned int flags;
91 unsigned int priv_flags; 118 unsigned int priv_flags;
92 unsigned int mc_count; 119 unsigned int mc_count;
93 unsigned int uc_count; 120 unsigned int uc_count;
94 u32 port_mtu; 121 u32 port_mtu;
122 struct enic_rx_coal rx_coalesce_setting;
95 u32 rx_coalesce_usecs; 123 u32 rx_coalesce_usecs;
96 u32 tx_coalesce_usecs; 124 u32 tx_coalesce_usecs;
97#ifdef CONFIG_PCI_IOV 125#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 4b6e5695b263..3e27df522847 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -88,7 +88,7 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
88 return err; 88 return err;
89} 89}
90 90
91int enic_dev_add_addr(struct enic *enic, u8 *addr) 91int enic_dev_add_addr(struct enic *enic, const u8 *addr)
92{ 92{
93 int err; 93 int err;
94 94
@@ -99,7 +99,7 @@ int enic_dev_add_addr(struct enic *enic, u8 *addr)
99 return err; 99 return err;
100} 100}
101 101
102int enic_dev_del_addr(struct enic *enic, u8 *addr) 102int enic_dev_del_addr(struct enic *enic, const u8 *addr)
103{ 103{
104 int err; 104 int err;
105 105
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 129b14a4efb0..36ea1ab25f6a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -45,8 +45,8 @@ int enic_dev_add_station_addr(struct enic *enic);
45int enic_dev_del_station_addr(struct enic *enic); 45int enic_dev_del_station_addr(struct enic *enic);
46int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, 46int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
47 int broadcast, int promisc, int allmulti); 47 int broadcast, int promisc, int allmulti);
48int enic_dev_add_addr(struct enic *enic, u8 *addr); 48int enic_dev_add_addr(struct enic *enic, const u8 *addr);
49int enic_dev_del_addr(struct enic *enic, u8 *addr); 49int enic_dev_del_addr(struct enic *enic, const u8 *addr);
50int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); 50int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
51int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); 51int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
52int enic_dev_notify_unset(struct enic *enic); 52int enic_dev_notify_unset(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 47e3562f4866..2e50b5489d20 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
81 81
82void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
83{
84 int i;
85 int intr;
86
87 for (i = 0; i < enic->rq_count; i++) {
88 intr = enic_msix_rq_intr(enic, i);
89 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
90 }
91}
92
82static int enic_get_settings(struct net_device *netdev, 93static int enic_get_settings(struct net_device *netdev,
83 struct ethtool_cmd *ecmd) 94 struct ethtool_cmd *ecmd)
84{ 95{
@@ -93,8 +104,8 @@ static int enic_get_settings(struct net_device *netdev,
93 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); 104 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
94 ecmd->duplex = DUPLEX_FULL; 105 ecmd->duplex = DUPLEX_FULL;
95 } else { 106 } else {
96 ethtool_cmd_speed_set(ecmd, -1); 107 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
97 ecmd->duplex = -1; 108 ecmd->duplex = DUPLEX_UNKNOWN;
98 } 109 }
99 110
100 ecmd->autoneg = AUTONEG_DISABLE; 111 ecmd->autoneg = AUTONEG_DISABLE;
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
178 struct ethtool_coalesce *ecmd) 189 struct ethtool_coalesce *ecmd)
179{ 190{
180 struct enic *enic = netdev_priv(netdev); 191 struct enic *enic = netdev_priv(netdev);
192 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
181 193
182 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; 194 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
183 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; 195 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
196 if (rxcoal->use_adaptive_rx_coalesce)
197 ecmd->use_adaptive_rx_coalesce = 1;
198 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
199 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
184 200
185 return 0; 201 return 0;
186} 202}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
191 struct enic *enic = netdev_priv(netdev); 207 struct enic *enic = netdev_priv(netdev);
192 u32 tx_coalesce_usecs; 208 u32 tx_coalesce_usecs;
193 u32 rx_coalesce_usecs; 209 u32 rx_coalesce_usecs;
210 u32 rx_coalesce_usecs_low;
211 u32 rx_coalesce_usecs_high;
212 u32 coalesce_usecs_max;
194 unsigned int i, intr; 213 unsigned int i, intr;
214 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
195 215
216 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
196 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, 217 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
197 vnic_dev_get_intr_coal_timer_max(enic->vdev)); 218 coalesce_usecs_max);
198 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, 219 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
199 vnic_dev_get_intr_coal_timer_max(enic->vdev)); 220 coalesce_usecs_max);
221
222 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
223 coalesce_usecs_max);
224 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
225 coalesce_usecs_max);
200 226
201 switch (vnic_dev_get_intr_mode(enic->vdev)) { 227 switch (vnic_dev_get_intr_mode(enic->vdev)) {
202 case VNIC_DEV_INTR_MODE_INTX: 228 case VNIC_DEV_INTR_MODE_INTX:
203 if (tx_coalesce_usecs != rx_coalesce_usecs) 229 if (tx_coalesce_usecs != rx_coalesce_usecs)
204 return -EINVAL; 230 return -EINVAL;
231 if (ecmd->use_adaptive_rx_coalesce ||
232 ecmd->rx_coalesce_usecs_low ||
233 ecmd->rx_coalesce_usecs_high)
234 return -EOPNOTSUPP;
205 235
206 intr = enic_legacy_io_intr(); 236 intr = enic_legacy_io_intr();
207 vnic_intr_coalescing_timer_set(&enic->intr[intr], 237 vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
210 case VNIC_DEV_INTR_MODE_MSI: 240 case VNIC_DEV_INTR_MODE_MSI:
211 if (tx_coalesce_usecs != rx_coalesce_usecs) 241 if (tx_coalesce_usecs != rx_coalesce_usecs)
212 return -EINVAL; 242 return -EINVAL;
243 if (ecmd->use_adaptive_rx_coalesce ||
244 ecmd->rx_coalesce_usecs_low ||
245 ecmd->rx_coalesce_usecs_high)
246 return -EOPNOTSUPP;
213 247
214 vnic_intr_coalescing_timer_set(&enic->intr[0], 248 vnic_intr_coalescing_timer_set(&enic->intr[0],
215 tx_coalesce_usecs); 249 tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
221 tx_coalesce_usecs); 255 tx_coalesce_usecs);
222 } 256 }
223 257
224 for (i = 0; i < enic->rq_count; i++) { 258 if (rxcoal->use_adaptive_rx_coalesce) {
225 intr = enic_msix_rq_intr(enic, i); 259 if (!ecmd->use_adaptive_rx_coalesce) {
226 vnic_intr_coalescing_timer_set(&enic->intr[intr], 260 rxcoal->use_adaptive_rx_coalesce = 0;
227 rx_coalesce_usecs); 261 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
262 }
263 } else {
264 if (ecmd->use_adaptive_rx_coalesce)
265 rxcoal->use_adaptive_rx_coalesce = 1;
266 else
267 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
228 } 268 }
229 269
270 if (ecmd->rx_coalesce_usecs_high) {
271 if (rx_coalesce_usecs_high <
272 (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
273 return -EINVAL;
274 rxcoal->range_end = rx_coalesce_usecs_high;
275 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
276 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
277 ENIC_AIC_LARGE_PKT_DIFF;
278 }
230 break; 279 break;
231 default: 280 default:
232 break; 281 break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
253 302
254void enic_set_ethtool_ops(struct net_device *netdev) 303void enic_set_ethtool_ops(struct net_device *netdev)
255{ 304{
256 SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops); 305 netdev->ethtool_ops = &enic_ethtool_ops;
257} 306}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718ce806..f32f828b7f3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
38#include <linux/rtnetlink.h> 38#include <linux/rtnetlink.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <net/ip6_checksum.h> 40#include <net/ip6_checksum.h>
41#include <linux/ktime.h>
41 42
42#include "cq_enet_desc.h" 43#include "cq_enet_desc.h"
43#include "vnic_dev.h" 44#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION); 73MODULE_VERSION(DRV_VERSION);
73MODULE_DEVICE_TABLE(pci, enic_id_table); 74MODULE_DEVICE_TABLE(pci, enic_id_table);
74 75
76#define ENIC_LARGE_PKT_THRESHOLD 1000
77#define ENIC_MAX_COALESCE_TIMERS 10
78/* Interrupt moderation table, which will be used to decide the
79 * coalescing timer values
80 * {rx_rate in Mbps, mapping percentage of the range}
81 */
82struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
83 {4000, 0},
84 {4400, 10},
85 {5060, 20},
86 {5230, 30},
87 {5540, 40},
88 {5820, 50},
89 {6120, 60},
90 {6435, 70},
91 {6745, 80},
92 {7000, 90},
93 {0xFFFFFFFF, 100}
94};
95
96/* This table helps the driver to pick different ranges for rx coalescing
97 * timer depending on the link speed.
98 */
99struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
100 {0, 0}, /* 0 - 4 Gbps */
101 {0, 3}, /* 4 - 10 Gbps */
102 {3, 6}, /* 10 - 40 Gbps */
103};
104
75int enic_is_dynamic(struct enic *enic) 105int enic_is_dynamic(struct enic *enic)
76{ 106{
77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 107 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -586,8 +616,71 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
586 return net_stats; 616 return net_stats;
587} 617}
588 618
619static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
620{
621 struct enic *enic = netdev_priv(netdev);
622
623 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
624 unsigned int mc_count = netdev_mc_count(netdev);
625
626 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
627 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
628
629 return -ENOSPC;
630 }
631
632 enic_dev_add_addr(enic, mc_addr);
633 enic->mc_count++;
634
635 return 0;
636}
637
638static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
639{
640 struct enic *enic = netdev_priv(netdev);
641
642 enic_dev_del_addr(enic, mc_addr);
643 enic->mc_count--;
644
645 return 0;
646}
647
648static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
649{
650 struct enic *enic = netdev_priv(netdev);
651
652 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
653 unsigned int uc_count = netdev_uc_count(netdev);
654
655 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
656 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
657
658 return -ENOSPC;
659 }
660
661 enic_dev_add_addr(enic, uc_addr);
662 enic->uc_count++;
663
664 return 0;
665}
666
667static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
668{
669 struct enic *enic = netdev_priv(netdev);
670
671 enic_dev_del_addr(enic, uc_addr);
672 enic->uc_count--;
673
674 return 0;
675}
676
589void enic_reset_addr_lists(struct enic *enic) 677void enic_reset_addr_lists(struct enic *enic)
590{ 678{
679 struct net_device *netdev = enic->netdev;
680
681 __dev_uc_unsync(netdev, NULL);
682 __dev_mc_unsync(netdev, NULL);
683
591 enic->mc_count = 0; 684 enic->mc_count = 0;
592 enic->uc_count = 0; 685 enic->uc_count = 0;
593 enic->flags = 0; 686 enic->flags = 0;
@@ -654,112 +747,6 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
654 return enic_dev_add_station_addr(enic); 747 return enic_dev_add_station_addr(enic);
655} 748}
656 749
657static void enic_update_multicast_addr_list(struct enic *enic)
658{
659 struct net_device *netdev = enic->netdev;
660 struct netdev_hw_addr *ha;
661 unsigned int mc_count = netdev_mc_count(netdev);
662 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
663 unsigned int i, j;
664
665 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
666 netdev_warn(netdev, "Registering only %d out of %d "
667 "multicast addresses\n",
668 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
669 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
670 }
671
672 /* Is there an easier way? Trying to minimize to
673 * calls to add/del multicast addrs. We keep the
674 * addrs from the last call in enic->mc_addr and
675 * look for changes to add/del.
676 */
677
678 i = 0;
679 netdev_for_each_mc_addr(ha, netdev) {
680 if (i == mc_count)
681 break;
682 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
683 }
684
685 for (i = 0; i < enic->mc_count; i++) {
686 for (j = 0; j < mc_count; j++)
687 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
688 break;
689 if (j == mc_count)
690 enic_dev_del_addr(enic, enic->mc_addr[i]);
691 }
692
693 for (i = 0; i < mc_count; i++) {
694 for (j = 0; j < enic->mc_count; j++)
695 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
696 break;
697 if (j == enic->mc_count)
698 enic_dev_add_addr(enic, mc_addr[i]);
699 }
700
701 /* Save the list to compare against next time
702 */
703
704 for (i = 0; i < mc_count; i++)
705 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
706
707 enic->mc_count = mc_count;
708}
709
710static void enic_update_unicast_addr_list(struct enic *enic)
711{
712 struct net_device *netdev = enic->netdev;
713 struct netdev_hw_addr *ha;
714 unsigned int uc_count = netdev_uc_count(netdev);
715 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
716 unsigned int i, j;
717
718 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
719 netdev_warn(netdev, "Registering only %d out of %d "
720 "unicast addresses\n",
721 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
722 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
723 }
724
725 /* Is there an easier way? Trying to minimize to
726 * calls to add/del unicast addrs. We keep the
727 * addrs from the last call in enic->uc_addr and
728 * look for changes to add/del.
729 */
730
731 i = 0;
732 netdev_for_each_uc_addr(ha, netdev) {
733 if (i == uc_count)
734 break;
735 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
736 }
737
738 for (i = 0; i < enic->uc_count; i++) {
739 for (j = 0; j < uc_count; j++)
740 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
741 break;
742 if (j == uc_count)
743 enic_dev_del_addr(enic, enic->uc_addr[i]);
744 }
745
746 for (i = 0; i < uc_count; i++) {
747 for (j = 0; j < enic->uc_count; j++)
748 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
749 break;
750 if (j == enic->uc_count)
751 enic_dev_add_addr(enic, uc_addr[i]);
752 }
753
754 /* Save the list to compare against next time
755 */
756
757 for (i = 0; i < uc_count; i++)
758 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
759
760 enic->uc_count = uc_count;
761}
762
763/* netif_tx_lock held, BHs disabled */ 750/* netif_tx_lock held, BHs disabled */
764static void enic_set_rx_mode(struct net_device *netdev) 751static void enic_set_rx_mode(struct net_device *netdev)
765{ 752{
@@ -782,9 +769,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
782 } 769 }
783 770
784 if (!promisc) { 771 if (!promisc) {
785 enic_update_unicast_addr_list(enic); 772 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
786 if (!allmulti) 773 if (!allmulti)
787 enic_update_multicast_addr_list(enic); 774 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
788 } 775 }
789} 776}
790 777
@@ -979,6 +966,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
979 return 0; 966 return 0;
980} 967}
981 968
969static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
970 u32 pkt_len)
971{
972 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
973 pkt_size->large_pkt_bytes_cnt += pkt_len;
974 else
975 pkt_size->small_pkt_bytes_cnt += pkt_len;
976}
977
982static void enic_rq_indicate_buf(struct vnic_rq *rq, 978static void enic_rq_indicate_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 979 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque) 980 int skipped, void *opaque)
@@ -986,6 +982,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
986 struct enic *enic = vnic_dev_priv(rq->vdev); 982 struct enic *enic = vnic_dev_priv(rq->vdev);
987 struct net_device *netdev = enic->netdev; 983 struct net_device *netdev = enic->netdev;
988 struct sk_buff *skb; 984 struct sk_buff *skb;
985 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
989 986
990 u8 type, color, eop, sop, ingress_port, vlan_stripped; 987 u8 type, color, eop, sop, ingress_port, vlan_stripped;
991 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 988 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1053,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1056 napi_gro_receive(&enic->napi[q_number], skb); 1053 napi_gro_receive(&enic->napi[q_number], skb);
1057 else 1054 else
1058 netif_receive_skb(skb); 1055 netif_receive_skb(skb);
1056 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1057 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1058 bytes_written);
1059 } else { 1059 } else {
1060 1060
1061 /* Buffer overflow 1061 /* Buffer overflow
@@ -1134,6 +1134,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
1134 return rq_work_done; 1134 return rq_work_done;
1135} 1135}
1136 1136
1137static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1138{
1139 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1140 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1141 u32 timer = cq->tobe_rx_coal_timeval;
1142
1143 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1144 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1145 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1146 }
1147}
1148
1149static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1150{
1151 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1152 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1153 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1154 int index;
1155 u32 timer;
1156 u32 range_start;
1157 u32 traffic;
1158 u64 delta;
1159 ktime_t now = ktime_get();
1160
1161 delta = ktime_us_delta(now, cq->prev_ts);
1162 if (delta < ENIC_AIC_TS_BREAK)
1163 return;
1164 cq->prev_ts = now;
1165
1166 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1167 pkt_size_counter->small_pkt_bytes_cnt;
1168 /* The table takes Mbps
1169 * traffic *= 8 => bits
1170 * traffic *= (10^6 / delta) => bps
1171 * traffic /= 10^6 => Mbps
1172 *
1173 * Combining, traffic *= (8 / delta)
1174 */
1175
1176 traffic <<= 3;
1177 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1178
1179 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1180 if (traffic < mod_table[index].rx_rate)
1181 break;
1182 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1183 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1184 rx_coal->small_pkt_range_start :
1185 rx_coal->large_pkt_range_start;
1186 timer = range_start + ((rx_coal->range_end - range_start) *
1187 mod_table[index].range_percent / 100);
1188 /* Damping */
1189 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1190
1191 pkt_size_counter->large_pkt_bytes_cnt = 0;
1192 pkt_size_counter->small_pkt_bytes_cnt = 0;
1193}
1194
1137static int enic_poll_msix(struct napi_struct *napi, int budget) 1195static int enic_poll_msix(struct napi_struct *napi, int budget)
1138{ 1196{
1139 struct net_device *netdev = napi->dev; 1197 struct net_device *netdev = napi->dev;
@@ -1171,6 +1229,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1171 1229
1172 if (err) 1230 if (err)
1173 work_done = work_to_do; 1231 work_done = work_to_do;
1232 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1233 /* Call the function which refreshes
1234 * the intr coalescing timer value based on
1235 * the traffic. This is supported only in
1236 * the case of MSI-x mode
1237 */
1238 enic_calc_int_moderation(enic, &enic->rq[rq]);
1174 1239
1175 if (work_done < work_to_do) { 1240 if (work_done < work_to_do) {
1176 1241
@@ -1179,6 +1244,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1179 */ 1244 */
1180 1245
1181 napi_complete(napi); 1246 napi_complete(napi);
1247 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1248 enic_set_int_moderation(enic, &enic->rq[rq]);
1182 vnic_intr_unmask(&enic->intr[intr]); 1249 vnic_intr_unmask(&enic->intr[intr]);
1183 } 1250 }
1184 1251
@@ -1314,6 +1381,42 @@ static void enic_synchronize_irqs(struct enic *enic)
1314 } 1381 }
1315} 1382}
1316 1383
1384static void enic_set_rx_coal_setting(struct enic *enic)
1385{
1386 unsigned int speed;
1387 int index = -1;
1388 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1389
1390 /* If intr mode is not MSIX, do not do adaptive coalescing */
1391 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1392 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1393 return;
1394 }
1395
1396 /* 1. Read the link speed from fw
1397 * 2. Pick the default range for the speed
1398 * 3. Update it in enic->rx_coalesce_setting
1399 */
1400 speed = vnic_dev_port_speed(enic->vdev);
1401 if (ENIC_LINK_SPEED_10G < speed)
1402 index = ENIC_LINK_40G_INDEX;
1403 else if (ENIC_LINK_SPEED_4G < speed)
1404 index = ENIC_LINK_10G_INDEX;
1405 else
1406 index = ENIC_LINK_4G_INDEX;
1407
1408 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1409 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1410 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1411
1412 /* Start with the value provided by UCSM */
1413 for (index = 0; index < enic->rq_count; index++)
1414 enic->cq[index].cur_rx_coal_timeval =
1415 enic->config.intr_timer_usec;
1416
1417 rx_coal->use_adaptive_rx_coalesce = 1;
1418}
1419
1317static int enic_dev_notify_set(struct enic *enic) 1420static int enic_dev_notify_set(struct enic *enic)
1318{ 1421{
1319 int err; 1422 int err;
@@ -2231,6 +2334,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2231 enic->notify_timer.function = enic_notify_timer; 2334 enic->notify_timer.function = enic_notify_timer;
2232 enic->notify_timer.data = (unsigned long)enic; 2335 enic->notify_timer.data = (unsigned long)enic;
2233 2336
2337 enic_set_rx_coal_setting(enic);
2234 INIT_WORK(&enic->reset, enic_reset); 2338 INIT_WORK(&enic->reset, enic_reset);
2235 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2339 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2236 2340
@@ -2250,6 +2354,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2250 } 2354 }
2251 2355
2252 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2356 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2357 /* rx coalesce time already got initialized. This gets used
2358 * if adaptive coal is turned off
2359 */
2253 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2360 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2254 2361
2255 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2362 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe803..4e6aa65857f7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
50 u32 pad10; 50 u32 pad10;
51}; 51};
52 52
53struct vnic_rx_bytes_counter {
54 unsigned int small_pkt_bytes_cnt;
55 unsigned int large_pkt_bytes_cnt;
56};
57
53struct vnic_cq { 58struct vnic_cq {
54 unsigned int index; 59 unsigned int index;
55 struct vnic_dev *vdev; 60 struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
58 unsigned int to_clean; 63 unsigned int to_clean;
59 unsigned int last_color; 64 unsigned int last_color;
60 unsigned int interrupt_offset; 65 unsigned int interrupt_offset;
66 struct vnic_rx_bytes_counter pkt_size_counter;
67 unsigned int cur_rx_coal_timeval;
68 unsigned int tobe_rx_coal_timeval;
69 ktime_t prev_ts;
61}; 70};
62 71
63static inline unsigned int vnic_cq_service(struct vnic_cq *cq, 72static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 69dd92598b7e..e86a45cb9e68 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -657,7 +657,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
657 return err; 657 return err;
658} 658}
659 659
660int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 660int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
661{ 661{
662 u64 a0 = 0, a1 = 0; 662 u64 a0 = 0, a1 = 0;
663 int wait = 1000; 663 int wait = 1000;
@@ -674,7 +674,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
674 return err; 674 return err;
675} 675}
676 676
677int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 677int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
678{ 678{
679 u64 a0 = 0, a1 = 0; 679 u64 a0 = 0, a1 = 0;
680 int wait = 1000; 680 int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index e670029862a1..1f3b301f8225 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -95,8 +95,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
95int vnic_dev_hang_notify(struct vnic_dev *vdev); 95int vnic_dev_hang_notify(struct vnic_dev *vdev);
96int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 96int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
97 int broadcast, int promisc, int allmulti); 97 int broadcast, int promisc, int allmulti);
98int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); 98int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr);
99int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 99int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr);
100int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 100int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
101int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 101int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
102int vnic_dev_notify_unset(struct vnic_dev *vdev); 102int vnic_dev_notify_unset(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8c4b93be333b..13723c96d1a2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -109,6 +109,7 @@ typedef struct board_info {
109 u8 imr_all; 109 u8 imr_all;
110 110
111 unsigned int flags; 111 unsigned int flags;
112 unsigned int in_timeout:1;
112 unsigned int in_suspend:1; 113 unsigned int in_suspend:1;
113 unsigned int wake_supported:1; 114 unsigned int wake_supported:1;
114 115
@@ -187,13 +188,13 @@ dm9000_reset(board_info_t *db)
187 * The essential point is that we have to do a double reset, and the 188 * The essential point is that we have to do a double reset, and the
188 * instruction is to set LBK into MAC internal loopback mode. 189 * instruction is to set LBK into MAC internal loopback mode.
189 */ 190 */
190 iow(db, DM9000_NCR, 0x03); 191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
191 udelay(100); /* Application note says at least 20 us */ 192 udelay(100); /* Application note says at least 20 us */
192 if (ior(db, DM9000_NCR) & 1) 193 if (ior(db, DM9000_NCR) & 1)
193 dev_err(db->dev, "dm9000 did not respond to first reset\n"); 194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
194 195
195 iow(db, DM9000_NCR, 0); 196 iow(db, DM9000_NCR, 0);
196 iow(db, DM9000_NCR, 0x03); 197 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
197 udelay(100); 198 udelay(100);
198 if (ior(db, DM9000_NCR) & 1) 199 if (ior(db, DM9000_NCR) & 1)
199 dev_err(db->dev, "dm9000 did not respond to second reset\n"); 200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
@@ -273,7 +274,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
273 */ 274 */
274static void dm9000_msleep(board_info_t *db, unsigned int ms) 275static void dm9000_msleep(board_info_t *db, unsigned int ms)
275{ 276{
276 if (db->in_suspend) 277 if (db->in_suspend || db->in_timeout)
277 mdelay(ms); 278 mdelay(ms);
278 else 279 else
279 msleep(ms); 280 msleep(ms);
@@ -334,7 +335,8 @@ dm9000_phy_write(struct net_device *dev,
334 unsigned long reg_save; 335 unsigned long reg_save;
335 336
336 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 337 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
337 mutex_lock(&db->addr_lock); 338 if (!db->in_timeout)
339 mutex_lock(&db->addr_lock);
338 340
339 spin_lock_irqsave(&db->lock, flags); 341 spin_lock_irqsave(&db->lock, flags);
340 342
@@ -365,7 +367,8 @@ dm9000_phy_write(struct net_device *dev,
365 writeb(reg_save, db->io_addr); 367 writeb(reg_save, db->io_addr);
366 368
367 spin_unlock_irqrestore(&db->lock, flags); 369 spin_unlock_irqrestore(&db->lock, flags);
368 mutex_unlock(&db->addr_lock); 370 if (!db->in_timeout)
371 mutex_unlock(&db->addr_lock);
369} 372}
370 373
371/* dm9000_set_io 374/* dm9000_set_io
@@ -882,6 +885,18 @@ dm9000_hash_table(struct net_device *dev)
882 spin_unlock_irqrestore(&db->lock, flags); 885 spin_unlock_irqrestore(&db->lock, flags);
883} 886}
884 887
888static void
889dm9000_mask_interrupts(board_info_t *db)
890{
891 iow(db, DM9000_IMR, IMR_PAR);
892}
893
894static void
895dm9000_unmask_interrupts(board_info_t *db)
896{
897 iow(db, DM9000_IMR, db->imr_all);
898}
899
885/* 900/*
886 * Initialize dm9000 board 901 * Initialize dm9000 board
887 */ 902 */
@@ -894,6 +909,9 @@ dm9000_init_dm9000(struct net_device *dev)
894 909
895 dm9000_dbg(db, 1, "entering %s\n", __func__); 910 dm9000_dbg(db, 1, "entering %s\n", __func__);
896 911
912 dm9000_reset(db);
913 dm9000_mask_interrupts(db);
914
897 /* I/O mode */ 915 /* I/O mode */
898 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 916 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
899 917
@@ -941,9 +959,6 @@ dm9000_init_dm9000(struct net_device *dev)
941 959
942 db->imr_all = imr; 960 db->imr_all = imr;
943 961
944 /* Enable TX/RX interrupt mask */
945 iow(db, DM9000_IMR, imr);
946
947 /* Init Driver variable */ 962 /* Init Driver variable */
948 db->tx_pkt_cnt = 0; 963 db->tx_pkt_cnt = 0;
949 db->queue_pkt_len = 0; 964 db->queue_pkt_len = 0;
@@ -959,17 +974,19 @@ static void dm9000_timeout(struct net_device *dev)
959 974
960 /* Save previous register address */ 975 /* Save previous register address */
961 spin_lock_irqsave(&db->lock, flags); 976 spin_lock_irqsave(&db->lock, flags);
977 db->in_timeout = 1;
962 reg_save = readb(db->io_addr); 978 reg_save = readb(db->io_addr);
963 979
964 netif_stop_queue(dev); 980 netif_stop_queue(dev);
965 dm9000_reset(db);
966 dm9000_init_dm9000(dev); 981 dm9000_init_dm9000(dev);
982 dm9000_unmask_interrupts(db);
967 /* We can accept TX packets again */ 983 /* We can accept TX packets again */
968 dev->trans_start = jiffies; /* prevent tx timeout */ 984 dev->trans_start = jiffies; /* prevent tx timeout */
969 netif_wake_queue(dev); 985 netif_wake_queue(dev);
970 986
971 /* Restore previous register address */ 987 /* Restore previous register address */
972 writeb(reg_save, db->io_addr); 988 writeb(reg_save, db->io_addr);
989 db->in_timeout = 0;
973 spin_unlock_irqrestore(&db->lock, flags); 990 spin_unlock_irqrestore(&db->lock, flags);
974} 991}
975 992
@@ -1093,7 +1110,6 @@ dm9000_rx(struct net_device *dev)
1093 if (rxbyte & DM9000_PKT_ERR) { 1110 if (rxbyte & DM9000_PKT_ERR) {
1094 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 1111 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1095 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1112 iow(db, DM9000_RCR, 0x00); /* Stop Device */
1096 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
1097 return; 1113 return;
1098 } 1114 }
1099 1115
@@ -1193,9 +1209,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1193 /* Save previous register address */ 1209 /* Save previous register address */
1194 reg_save = readb(db->io_addr); 1210 reg_save = readb(db->io_addr);
1195 1211
1196 /* Disable all interrupts */ 1212 dm9000_mask_interrupts(db);
1197 iow(db, DM9000_IMR, IMR_PAR);
1198
1199 /* Got DM9000 interrupt status */ 1213 /* Got DM9000 interrupt status */
1200 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1214 int_status = ior(db, DM9000_ISR); /* Got ISR */
1201 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1215 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
@@ -1218,9 +1232,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1218 } 1232 }
1219 } 1233 }
1220 1234
1221 /* Re-enable interrupt mask */ 1235 dm9000_unmask_interrupts(db);
1222 iow(db, DM9000_IMR, db->imr_all);
1223
1224 /* Restore previous register address */ 1236 /* Restore previous register address */
1225 writeb(reg_save, db->io_addr); 1237 writeb(reg_save, db->io_addr);
1226 1238
@@ -1292,6 +1304,9 @@ dm9000_open(struct net_device *dev)
1292 * may work, and tell the user that this is a problem */ 1304 * may work, and tell the user that this is a problem */
1293 1305
1294 if (irqflags == IRQF_TRIGGER_NONE) 1306 if (irqflags == IRQF_TRIGGER_NONE)
1307 irqflags = irq_get_trigger_type(dev->irq);
1308
1309 if (irqflags == IRQF_TRIGGER_NONE)
1295 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1296 1311
1297 irqflags |= IRQF_SHARED; 1312 irqflags |= IRQF_SHARED;
@@ -1301,11 +1316,14 @@ dm9000_open(struct net_device *dev)
1301 mdelay(1); /* delay needs by DM9000B */ 1316 mdelay(1); /* delay needs by DM9000B */
1302 1317
1303 /* Initialize DM9000 board */ 1318 /* Initialize DM9000 board */
1304 dm9000_reset(db);
1305 dm9000_init_dm9000(dev); 1319 dm9000_init_dm9000(dev);
1306 1320
1307 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1321 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1308 return -EAGAIN; 1322 return -EAGAIN;
1323 /* Now that we have an interrupt handler hooked up we can unmask
1324 * our interrupts
1325 */
1326 dm9000_unmask_interrupts(db);
1309 1327
1310 /* Init driver variable */ 1328 /* Init driver variable */
1311 db->dbug_cnt = 0; 1329 db->dbug_cnt = 0;
@@ -1313,7 +1331,8 @@ dm9000_open(struct net_device *dev)
1313 mii_check_media(&db->mii, netif_msg_link(db), 1); 1331 mii_check_media(&db->mii, netif_msg_link(db), 1);
1314 netif_start_queue(dev); 1332 netif_start_queue(dev);
1315 1333
1316 dm9000_schedule_poll(db); 1334 /* Poll initial link status */
1335 schedule_delayed_work(&db->phy_poll, 1);
1317 1336
1318 return 0; 1337 return 0;
1319} 1338}
@@ -1326,7 +1345,7 @@ dm9000_shutdown(struct net_device *dev)
1326 /* RESET device */ 1345 /* RESET device */
1327 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1346 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1328 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1347 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1329 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */ 1348 dm9000_mask_interrupts(db);
1330 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1349 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1331} 1350}
1332 1351
@@ -1547,12 +1566,7 @@ dm9000_probe(struct platform_device *pdev)
1547 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1566 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1548#endif 1567#endif
1549 1568
1550 /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), 1569 dm9000_reset(db);
1551 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
1552 * while probe stage.
1553 */
1554
1555 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1556 1570
1557 /* try multiple times, DM9000 sometimes gets the read wrong */ 1571 /* try multiple times, DM9000 sometimes gets the read wrong */
1558 for (i = 0; i < 8; i++) { 1572 for (i = 0; i < 8; i++) {
@@ -1695,8 +1709,8 @@ dm9000_drv_resume(struct device *dev)
1695 /* reset if we were not in wake mode to ensure if 1709 /* reset if we were not in wake mode to ensure if
1696 * the device was powered off it is in a known state */ 1710 * the device was powered off it is in a known state */
1697 if (!db->wake_state) { 1711 if (!db->wake_state) {
1698 dm9000_reset(db);
1699 dm9000_init_dm9000(ndev); 1712 dm9000_init_dm9000(ndev);
1713 dm9000_unmask_interrupts(db);
1700 } 1714 }
1701 1715
1702 netif_device_attach(ndev); 1716 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1642de78aac8..861660841ce2 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1703#ifdef CONFIG_TULIP_NAPI 1703#ifdef CONFIG_TULIP_NAPI
1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16); 1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1705#endif 1705#endif
1706 SET_ETHTOOL_OPS(dev, &ops); 1706 dev->ethtool_ops = &ops;
1707 1707
1708 if (register_netdev(dev)) 1708 if (register_netdev(dev))
1709 goto err_out_free_ring; 1709 goto err_out_free_ring;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa801a6af7b9..80afec335a11 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -962,8 +962,8 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
962 } 962 }
963 if(db->link_failed) 963 if(db->link_failed)
964 { 964 {
965 ethtool_cmd_speed_set(ecmd, -1); 965 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
966 ecmd->duplex = -1; 966 ecmd->duplex = DUPLEX_UNKNOWN;
967 } 967 }
968 968
969 if (db->media_mode & ULI526X_AUTO) 969 if (db->media_mode & ULI526X_AUTO)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 4fb756d219f7..1274b6fdac8a 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
227 } 227 }
228 dev->netdev_ops = &netdev_ops; 228 dev->netdev_ops = &netdev_ops;
229 dev->watchdog_timeo = TX_TIMEOUT; 229 dev->watchdog_timeo = TX_TIMEOUT;
230 SET_ETHTOOL_OPS(dev, &ethtool_ops); 230 dev->ethtool_ops = &ethtool_ops;
231#if 0 231#if 0
232 dev->features = NETIF_F_IP_CSUM; 232 dev->features = NETIF_F_IP_CSUM;
233#endif 233#endif
@@ -1185,8 +1185,8 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1185 ethtool_cmd_speed_set(cmd, np->speed); 1185 ethtool_cmd_speed_set(cmd, np->speed);
1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1187 } else { 1187 } else {
1188 ethtool_cmd_speed_set(cmd, -1); 1188 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1189 cmd->duplex = -1; 1189 cmd->duplex = DUPLEX_UNKNOWN;
1190 } 1190 }
1191 if ( np->an_enable) 1191 if ( np->an_enable)
1192 cmd->autoneg = AUTONEG_ENABLE; 1192 cmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d9e5ca0d48c1..433c1e185442 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
577 577
578 /* The chip-specific entries in the device structure. */ 578 /* The chip-specific entries in the device structure. */
579 dev->netdev_ops = &netdev_ops; 579 dev->netdev_ops = &netdev_ops;
580 SET_ETHTOOL_OPS(dev, &ethtool_ops); 580 dev->ethtool_ops = &ethtool_ops;
581 dev->watchdog_timeo = TX_TIMEOUT; 581 dev->watchdog_timeo = TX_TIMEOUT;
582 582
583 pci_set_drvdata(pdev, dev); 583 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 4884205e56ee..056b44b93477 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -134,17 +134,17 @@ struct ec_bhf_priv {
134 134
135 struct pci_dev *dev; 135 struct pci_dev *dev;
136 136
137 void * __iomem io; 137 void __iomem *io;
138 void * __iomem dma_io; 138 void __iomem *dma_io;
139 139
140 struct hrtimer hrtimer; 140 struct hrtimer hrtimer;
141 141
142 int tx_dma_chan; 142 int tx_dma_chan;
143 int rx_dma_chan; 143 int rx_dma_chan;
144 void * __iomem ec_io; 144 void __iomem *ec_io;
145 void * __iomem fifo_io; 145 void __iomem *fifo_io;
146 void * __iomem mii_io; 146 void __iomem *mii_io;
147 void * __iomem mac_io; 147 void __iomem *mac_io;
148 148
149 struct bhf_dma rx_buf; 149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs; 150 struct rx_desc *rx_descs;
@@ -297,7 +297,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{ 297{
298 struct device *dev = PRIV_TO_DEV(priv); 298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i; 299 unsigned block_count, i;
300 void * __iomem ec_info; 300 void __iomem *ec_info;
301 301
302 dev_dbg(dev, "Info block:\n"); 302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); 303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
@@ -569,8 +569,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{ 569{
570 struct net_device *net_dev; 570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv; 571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io; 572 void __iomem *dma_io;
573 void * __iomem io; 573 void __iomem *io;
574 int err = 0; 574 int err = 0;
575 575
576 err = pci_enable_device(dev); 576 err = pci_enable_device(dev);
@@ -615,7 +615,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
615 } 615 }
616 616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); 617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) { 618 if (net_dev == NULL) {
619 err = -ENOMEM; 619 err = -ENOMEM;
620 goto err_unmap_dma_io; 620 goto err_unmap_dma_io;
621 } 621 }
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..2e7c5553955e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ 120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
121#define FW_VER_LEN 32 121#define FW_VER_LEN 32
122 122
123#define RSS_INDIR_TABLE_LEN 128
124#define RSS_HASH_KEY_LEN 40
125
123struct be_dma_mem { 126struct be_dma_mem {
124 void *va; 127 void *va;
125 dma_addr_t dma; 128 dma_addr_t dma;
@@ -371,6 +374,7 @@ enum vf_state {
371#define BE_FLAGS_LINK_STATUS_INIT 1 374#define BE_FLAGS_LINK_STATUS_INIT 1
372#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 375#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
373#define BE_FLAGS_VLAN_PROMISC (1 << 4) 376#define BE_FLAGS_VLAN_PROMISC (1 << 4)
377#define BE_FLAGS_MCAST_PROMISC (1 << 5)
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 378#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 379#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 380#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
@@ -409,6 +413,13 @@ struct be_resources {
409 u32 if_cap_flags; 413 u32 if_cap_flags;
410}; 414};
411 415
416struct rss_info {
417 u64 rss_flags;
418 u8 rsstable[RSS_INDIR_TABLE_LEN];
419 u8 rss_queue[RSS_INDIR_TABLE_LEN];
420 u8 rss_hkey[RSS_HASH_KEY_LEN];
421};
422
412struct be_adapter { 423struct be_adapter {
413 struct pci_dev *pdev; 424 struct pci_dev *pdev;
414 struct net_device *netdev; 425 struct net_device *netdev;
@@ -445,7 +456,7 @@ struct be_adapter {
445 struct be_drv_stats drv_stats; 456 struct be_drv_stats drv_stats;
446 struct be_aic_obj aic_obj[MAX_EVT_QS]; 457 struct be_aic_obj aic_obj[MAX_EVT_QS];
447 u16 vlans_added; 458 u16 vlans_added;
448 u8 vlan_tag[VLAN_N_VID]; 459 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
449 u8 vlan_prio_bmap; /* Available Priority BitMap */ 460 u8 vlan_prio_bmap; /* Available Priority BitMap */
450 u16 recommended_prio; /* Recommended Priority */ 461 u16 recommended_prio; /* Recommended Priority */
451 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ 462 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +518,7 @@ struct be_adapter {
507 u32 msg_enable; 518 u32 msg_enable;
508 int be_get_temp_freq; 519 int be_get_temp_freq;
509 u8 pf_number; 520 u8 pf_number;
510 u64 rss_flags; 521 struct rss_info rss_info;
511}; 522};
512 523
513#define be_physfn(adapter) (!adapter->virtfn) 524#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..f4ea3490f446 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
52 } 52 }
53}; 53};
54 54
55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, 55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
56 u8 subsystem)
57{ 56{
58 int i; 57 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
120 return (void *)addr; 119 return (void *)addr;
121} 120}
122 121
123static int be_mcc_compl_process(struct be_adapter *adapter, 122static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
124 struct be_mcc_compl *compl)
125{ 123{
126 u16 compl_status, extd_status; 124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
127 struct be_cmd_resp_hdr *resp_hdr; 125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
128 u8 opcode = 0, subsystem = 0; 126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
129 127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
130 /* Just swap the status to host endian; mcc tag is opaquely copied 128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
131 * from mcc_wrb */ 129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
132 be_dws_le_to_cpu(compl, 4); 130 return true;
133 131 else
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 132 return false;
135 CQE_STATUS_COMPL_MASK; 133}
136 134
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 135/* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
137 */
138static void be_async_cmd_process(struct be_adapter *adapter,
139 struct be_mcc_compl *compl,
140 struct be_cmd_resp_hdr *resp_hdr)
141{
142 enum mcc_base_status base_status = base_status(compl->status);
143 u8 opcode = 0, subsystem = 0;
138 144
139 if (resp_hdr) { 145 if (resp_hdr) {
140 opcode = resp_hdr->opcode; 146 opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl); 152 complete(&adapter->et_cmd_compl);
147 return 0; 153 return;
148 } 154 }
149 155
150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 157 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
152 (subsystem == CMD_SUBSYSTEM_COMMON)) { 158 subsystem == CMD_SUBSYSTEM_COMMON) {
153 adapter->flash_status = compl_status; 159 adapter->flash_status = compl->status;
154 complete(&adapter->et_cmd_compl); 160 complete(&adapter->et_cmd_compl);
161 return;
155 } 162 }
156 163
157 if (compl_status == MCC_STATUS_SUCCESS) { 164 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
158 if (((opcode == OPCODE_ETH_GET_STATISTICS) || 165 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
159 (opcode == OPCODE_ETH_GET_PPORT_STATS)) && 166 subsystem == CMD_SUBSYSTEM_ETH &&
160 (subsystem == CMD_SUBSYSTEM_ETH)) { 167 base_status == MCC_STATUS_SUCCESS) {
161 be_parse_stats(adapter); 168 be_parse_stats(adapter);
162 adapter->stats_cmd_sent = false; 169 adapter->stats_cmd_sent = false;
163 } 170 return;
164 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 171 }
165 subsystem == CMD_SUBSYSTEM_COMMON) { 172
173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 if (base_status == MCC_STATUS_SUCCESS) {
166 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 176 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
167 (void *)resp_hdr; 177 (void *)resp_hdr;
168 adapter->drv_stats.be_on_die_temperature = 178 adapter->drv_stats.be_on_die_temperature =
169 resp->on_die_temperature; 179 resp->on_die_temperature;
170 } 180 } else {
171 } else {
172 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
173 adapter->be_get_temp_freq = 0; 181 adapter->be_get_temp_freq = 0;
182 }
183 return;
184 }
185}
186
187static int be_mcc_compl_process(struct be_adapter *adapter,
188 struct be_mcc_compl *compl)
189{
190 enum mcc_base_status base_status;
191 enum mcc_addl_status addl_status;
192 struct be_cmd_resp_hdr *resp_hdr;
193 u8 opcode = 0, subsystem = 0;
194
195 /* Just swap the status to host endian; mcc tag is opaquely copied
196 * from mcc_wrb */
197 be_dws_le_to_cpu(compl, 4);
198
199 base_status = base_status(compl->status);
200 addl_status = addl_status(compl->status);
201
202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
203 if (resp_hdr) {
204 opcode = resp_hdr->opcode;
205 subsystem = resp_hdr->subsystem;
206 }
207
208 be_async_cmd_process(adapter, compl, resp_hdr);
174 209
175 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 210 if (base_status != MCC_STATUS_SUCCESS &&
176 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 211 !be_skip_err_log(opcode, base_status, addl_status)) {
177 goto done;
178 212
179 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
180 dev_warn(&adapter->pdev->dev, 214 dev_warn(&adapter->pdev->dev,
181 "VF is not privileged to issue opcode %d-%d\n", 215 "VF is not privileged to issue opcode %d-%d\n",
182 opcode, subsystem); 216 opcode, subsystem);
183 } else { 217 } else {
184 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
185 CQE_STATUS_EXTD_MASK;
186 dev_err(&adapter->pdev->dev, 218 dev_err(&adapter->pdev->dev,
187 "opcode %d-%d failed:status %d-%d\n", 219 "opcode %d-%d failed:status %d-%d\n",
188 opcode, subsystem, compl_status, extd_status); 220 opcode, subsystem, base_status, addl_status);
189
190 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
191 return extd_status;
192 } 221 }
193 } 222 }
194done: 223 return compl->status;
195 return compl_status;
196} 224}
197 225
198/* Link state evt is a string of bytes; no need for endian swapping */ 226/* Link state evt is a string of bytes; no need for endian swapping */
199static void be_async_link_state_process(struct be_adapter *adapter, 227static void be_async_link_state_process(struct be_adapter *adapter,
200 struct be_async_event_link_state *evt) 228 struct be_mcc_compl *compl)
201{ 229{
230 struct be_async_event_link_state *evt =
231 (struct be_async_event_link_state *)compl;
232
202 /* When link status changes, link speed must be re-queried from FW */ 233 /* When link status changes, link speed must be re-queried from FW */
203 adapter->phy.link_speed = -1; 234 adapter->phy.link_speed = -1;
204 235
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
221 252
222/* Grp5 CoS Priority evt */ 253/* Grp5 CoS Priority evt */
223static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 254static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
224 struct be_async_event_grp5_cos_priority *evt) 255 struct be_mcc_compl *compl)
225{ 256{
257 struct be_async_event_grp5_cos_priority *evt =
258 (struct be_async_event_grp5_cos_priority *)compl;
259
226 if (evt->valid) { 260 if (evt->valid) {
227 adapter->vlan_prio_bmap = evt->available_priority_bmap; 261 adapter->vlan_prio_bmap = evt->available_priority_bmap;
228 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 262 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
233 267
234/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 268/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
235static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 269static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
236 struct be_async_event_grp5_qos_link_speed *evt) 270 struct be_mcc_compl *compl)
237{ 271{
272 struct be_async_event_grp5_qos_link_speed *evt =
273 (struct be_async_event_grp5_qos_link_speed *)compl;
274
238 if (adapter->phy.link_speed >= 0 && 275 if (adapter->phy.link_speed >= 0 &&
239 evt->physical_port == adapter->port_num) 276 evt->physical_port == adapter->port_num)
240 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 277 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
242 279
243/*Grp5 PVID evt*/ 280/*Grp5 PVID evt*/
244static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 281static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
245 struct be_async_event_grp5_pvid_state *evt) 282 struct be_mcc_compl *compl)
246{ 283{
284 struct be_async_event_grp5_pvid_state *evt =
285 (struct be_async_event_grp5_pvid_state *)compl;
286
247 if (evt->enabled) { 287 if (evt->enabled) {
248 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 288 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
249 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 289 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
253} 293}
254 294
255static void be_async_grp5_evt_process(struct be_adapter *adapter, 295static void be_async_grp5_evt_process(struct be_adapter *adapter,
256 u32 trailer, struct be_mcc_compl *evt) 296 struct be_mcc_compl *compl)
257{ 297{
258 u8 event_type = 0; 298 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
259 299 ASYNC_EVENT_TYPE_MASK;
260 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
261 ASYNC_TRAILER_EVENT_TYPE_MASK;
262 300
263 switch (event_type) { 301 switch (event_type) {
264 case ASYNC_EVENT_COS_PRIORITY: 302 case ASYNC_EVENT_COS_PRIORITY:
265 be_async_grp5_cos_priority_process(adapter, 303 be_async_grp5_cos_priority_process(adapter, compl);
266 (struct be_async_event_grp5_cos_priority *)evt); 304 break;
267 break;
268 case ASYNC_EVENT_QOS_SPEED: 305 case ASYNC_EVENT_QOS_SPEED:
269 be_async_grp5_qos_speed_process(adapter, 306 be_async_grp5_qos_speed_process(adapter, compl);
270 (struct be_async_event_grp5_qos_link_speed *)evt); 307 break;
271 break;
272 case ASYNC_EVENT_PVID_STATE: 308 case ASYNC_EVENT_PVID_STATE:
273 be_async_grp5_pvid_state_process(adapter, 309 be_async_grp5_pvid_state_process(adapter, compl);
274 (struct be_async_event_grp5_pvid_state *)evt); 310 break;
275 break;
276 default: 311 default:
277 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", 312 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
278 event_type); 313 event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
281} 316}
282 317
283static void be_async_dbg_evt_process(struct be_adapter *adapter, 318static void be_async_dbg_evt_process(struct be_adapter *adapter,
284 u32 trailer, struct be_mcc_compl *cmp) 319 struct be_mcc_compl *cmp)
285{ 320{
286 u8 event_type = 0; 321 u8 event_type = 0;
287 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; 322 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
288 323
289 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & 324 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
290 ASYNC_TRAILER_EVENT_TYPE_MASK; 325 ASYNC_EVENT_TYPE_MASK;
291 326
292 switch (event_type) { 327 switch (event_type) {
293 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 328 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
302 } 337 }
303} 338}
304 339
305static inline bool is_link_state_evt(u32 trailer) 340static inline bool is_link_state_evt(u32 flags)
306{ 341{
307 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 342 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
308 ASYNC_TRAILER_EVENT_CODE_MASK) == 343 ASYNC_EVENT_CODE_LINK_STATE;
309 ASYNC_EVENT_CODE_LINK_STATE;
310} 344}
311 345
312static inline bool is_grp5_evt(u32 trailer) 346static inline bool is_grp5_evt(u32 flags)
313{ 347{
314 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 348 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
315 ASYNC_TRAILER_EVENT_CODE_MASK) == 349 ASYNC_EVENT_CODE_GRP_5;
316 ASYNC_EVENT_CODE_GRP_5);
317} 350}
318 351
319static inline bool is_dbg_evt(u32 trailer) 352static inline bool is_dbg_evt(u32 flags)
320{ 353{
321 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 354 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
322 ASYNC_TRAILER_EVENT_CODE_MASK) == 355 ASYNC_EVENT_CODE_QNQ;
323 ASYNC_EVENT_CODE_QNQ); 356}
357
358static void be_mcc_event_process(struct be_adapter *adapter,
359 struct be_mcc_compl *compl)
360{
361 if (is_link_state_evt(compl->flags))
362 be_async_link_state_process(adapter, compl);
363 else if (is_grp5_evt(compl->flags))
364 be_async_grp5_evt_process(adapter, compl);
365 else if (is_dbg_evt(compl->flags))
366 be_async_dbg_evt_process(adapter, compl);
324} 367}
325 368
326static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 369static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
362 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 405 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
363 406
364 spin_lock(&adapter->mcc_cq_lock); 407 spin_lock(&adapter->mcc_cq_lock);
408
365 while ((compl = be_mcc_compl_get(adapter))) { 409 while ((compl = be_mcc_compl_get(adapter))) {
366 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 410 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
367 /* Interpret flags as an async trailer */ 411 be_mcc_event_process(adapter, compl);
368 if (is_link_state_evt(compl->flags))
369 be_async_link_state_process(adapter,
370 (struct be_async_event_link_state *) compl);
371 else if (is_grp5_evt(compl->flags))
372 be_async_grp5_evt_process(adapter,
373 compl->flags, compl);
374 else if (is_dbg_evt(compl->flags))
375 be_async_dbg_evt_process(adapter,
376 compl->flags, compl);
377 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 412 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
378 status = be_mcc_compl_process(adapter, compl); 413 status = be_mcc_compl_process(adapter, compl);
379 atomic_dec(&mcc_obj->q.used); 414 atomic_dec(&mcc_obj->q.used);
380 } 415 }
381 be_mcc_compl_use(compl); 416 be_mcc_compl_use(compl);
382 num++; 417 num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
436 if (status == -EIO) 471 if (status == -EIO)
437 goto out; 472 goto out;
438 473
439 status = resp->status; 474 status = (resp->base_status |
475 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
476 CQE_ADDL_STATUS_SHIFT));
440out: 477out:
441 return status; 478 return status;
442} 479}
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
560 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 597 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
561 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 598 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
562 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 599 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
563 sliport_err1 = ioread32(adapter->db + 600 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
564 SLIPORT_ERROR1_OFFSET); 601 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
565 sliport_err2 = ioread32(adapter->db +
566 SLIPORT_ERROR2_OFFSET);
567 602
568 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && 603 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
569 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) 604 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
630 if (stage == POST_STAGE_ARMFW_RDY) 665 if (stage == POST_STAGE_ARMFW_RDY)
631 return 0; 666 return 0;
632 667
633 dev_info(dev, "Waiting for POST, %ds elapsed\n", 668 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
634 timeout);
635 if (msleep_interruptible(2000)) { 669 if (msleep_interruptible(2000)) {
636 dev_err(dev, "Waiting for POST aborted\n"); 670 dev_err(dev, "Waiting for POST aborted\n");
637 return -EINTR; 671 return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
649 return &wrb->payload.sgl[0]; 683 return &wrb->payload.sgl[0];
650} 684}
651 685
652static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, 686static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
653 unsigned long addr)
654{ 687{
655 wrb->tag0 = addr & 0xFFFFFFFF; 688 wrb->tag0 = addr & 0xFFFFFFFF;
656 wrb->tag1 = upper_32_bits(addr); 689 wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
659/* Don't touch the hdr after it's prepared */ 692/* Don't touch the hdr after it's prepared */
660/* mem will be NULL for embedded commands */ 693/* mem will be NULL for embedded commands */
661static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 694static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
662 u8 subsystem, u8 opcode, int cmd_len, 695 u8 subsystem, u8 opcode, int cmd_len,
663 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 696 struct be_mcc_wrb *wrb,
697 struct be_dma_mem *mem)
664{ 698{
665 struct be_sge *sge; 699 struct be_sge *sge;
666 700
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
683} 717}
684 718
685static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 719static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
686 struct be_dma_mem *mem) 720 struct be_dma_mem *mem)
687{ 721{
688 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 722 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
689 u64 dma = (u64)mem->dma; 723 u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
868 req = embedded_payload(wrb); 902 req = embedded_payload(wrb);
869 903
870 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
871 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 905 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
906 NULL);
872 907
873 /* Support for EQ_CREATEv2 available only SH-R onwards */ 908 /* Support for EQ_CREATEv2 available only SH-R onwards */
874 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 909 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
917 req = embedded_payload(wrb); 952 req = embedded_payload(wrb);
918 953
919 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 954 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
920 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 955 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
956 NULL);
921 req->type = MAC_ADDRESS_TYPE_NETWORK; 957 req->type = MAC_ADDRESS_TYPE_NETWORK;
922 if (permanent) { 958 if (permanent) {
923 req->permanent = 1; 959 req->permanent = 1;
@@ -940,7 +976,7 @@ err:
940 976
941/* Uses synchronous MCCQ */ 977/* Uses synchronous MCCQ */
942int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 978int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
943 u32 if_id, u32 *pmac_id, u32 domain) 979 u32 if_id, u32 *pmac_id, u32 domain)
944{ 980{
945 struct be_mcc_wrb *wrb; 981 struct be_mcc_wrb *wrb;
946 struct be_cmd_req_pmac_add *req; 982 struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
956 req = embedded_payload(wrb); 992 req = embedded_payload(wrb);
957 993
958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 994 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
959 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); 995 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
996 NULL);
960 997
961 req->hdr.domain = domain; 998 req->hdr.domain = domain;
962 req->if_id = cpu_to_le32(if_id); 999 req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
1012 1049
1013/* Uses Mbox */ 1050/* Uses Mbox */
1014int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1051int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1015 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1052 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1016{ 1053{
1017 struct be_mcc_wrb *wrb; 1054 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_cq_create *req; 1055 struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1028 ctxt = &req->context; 1065 ctxt = &req->context;
1029 1066
1030 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1031 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 1068 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1069 NULL);
1032 1070
1033 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1071 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1034 1072
1035 if (BEx_chip(adapter)) { 1073 if (BEx_chip(adapter)) {
1036 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1074 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1037 coalesce_wm); 1075 coalesce_wm);
1038 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1076 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1039 ctxt, no_delay); 1077 ctxt, no_delay);
1040 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1078 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1041 __ilog2_u32(cq->len/256)); 1079 __ilog2_u32(cq->len / 256));
1042 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1080 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1043 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1081 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1044 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1082 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1053 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1091 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1054 ctxt, coalesce_wm); 1092 ctxt, coalesce_wm);
1055 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1093 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1056 no_delay); 1094 no_delay);
1057 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1095 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1058 __ilog2_u32(cq->len/256)); 1096 __ilog2_u32(cq->len / 256));
1059 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1097 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1060 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, 1098 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1061 ctxt, 1); 1099 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1062 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1063 ctxt, eq->id);
1064 } 1100 }
1065 1101
1066 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1102 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
1088} 1124}
1089 1125
1090static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1126static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1091 struct be_queue_info *mccq, 1127 struct be_queue_info *mccq,
1092 struct be_queue_info *cq) 1128 struct be_queue_info *cq)
1093{ 1129{
1094 struct be_mcc_wrb *wrb; 1130 struct be_mcc_wrb *wrb;
1095 struct be_cmd_req_mcc_ext_create *req; 1131 struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1105 ctxt = &req->context; 1141 ctxt = &req->context;
1106 1142
1107 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1108 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); 1144 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1145 NULL);
1109 1146
1110 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1147 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1111 if (BEx_chip(adapter)) { 1148 if (BEx_chip(adapter)) {
1112 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1149 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1113 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1150 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1114 be_encoded_q_len(mccq->len)); 1151 be_encoded_q_len(mccq->len));
1115 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1152 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1116 } else { 1153 } else {
1117 req->hdr.version = 1; 1154 req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1145} 1182}
1146 1183
1147static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1184static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1148 struct be_queue_info *mccq, 1185 struct be_queue_info *mccq,
1149 struct be_queue_info *cq) 1186 struct be_queue_info *cq)
1150{ 1187{
1151 struct be_mcc_wrb *wrb; 1188 struct be_mcc_wrb *wrb;
1152 struct be_cmd_req_mcc_create *req; 1189 struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1162 ctxt = &req->context; 1199 ctxt = &req->context;
1163 1200
1164 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1165 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); 1202 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1203 NULL);
1166 1204
1167 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1205 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1168 1206
1169 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1207 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1170 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1208 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1171 be_encoded_q_len(mccq->len)); 1209 be_encoded_q_len(mccq->len));
1172 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1210 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1173 1211
1174 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1212 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1187} 1225}
1188 1226
1189int be_cmd_mccq_create(struct be_adapter *adapter, 1227int be_cmd_mccq_create(struct be_adapter *adapter,
1190 struct be_queue_info *mccq, 1228 struct be_queue_info *mccq, struct be_queue_info *cq)
1191 struct be_queue_info *cq)
1192{ 1229{
1193 int status; 1230 int status;
1194 1231
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1213 1250
1214 req = embedded_payload(&wrb); 1251 req = embedded_payload(&wrb);
1215 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1216 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1253 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1217 1254
1218 if (lancer_chip(adapter)) { 1255 if (lancer_chip(adapter)) {
1219 req->hdr.version = 1; 1256 req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1250 1287
1251/* Uses MCC */ 1288/* Uses MCC */
1252int be_cmd_rxq_create(struct be_adapter *adapter, 1289int be_cmd_rxq_create(struct be_adapter *adapter,
1253 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1290 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1254 u32 if_id, u32 rss, u8 *rss_id) 1291 u32 if_id, u32 rss, u8 *rss_id)
1255{ 1292{
1256 struct be_mcc_wrb *wrb; 1293 struct be_mcc_wrb *wrb;
1257 struct be_cmd_req_eth_rx_create *req; 1294 struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
1268 req = embedded_payload(wrb); 1305 req = embedded_payload(wrb);
1269 1306
1270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1271 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1308 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1272 1309
1273 req->cq_id = cpu_to_le16(cq_id); 1310 req->cq_id = cpu_to_le16(cq_id);
1274 req->frag_size = fls(frag_size) - 1; 1311 req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
1295 * Uses Mbox 1332 * Uses Mbox
1296 */ 1333 */
1297int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1334int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1298 int queue_type) 1335 int queue_type)
1299{ 1336{
1300 struct be_mcc_wrb *wrb; 1337 struct be_mcc_wrb *wrb;
1301 struct be_cmd_req_q_destroy *req; 1338 struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1334 } 1371 }
1335 1372
1336 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1373 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1337 NULL); 1374 NULL);
1338 req->id = cpu_to_le16(q->id); 1375 req->id = cpu_to_le16(q->id);
1339 1376
1340 status = be_mbox_notify_wait(adapter); 1377 status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1361 req = embedded_payload(wrb); 1398 req = embedded_payload(wrb);
1362 1399
1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1364 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1401 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1365 req->id = cpu_to_le16(q->id); 1402 req->id = cpu_to_le16(q->id);
1366 1403
1367 status = be_mcc_notify_wait(adapter); 1404 status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1384 1421
1385 req = embedded_payload(&wrb); 1422 req = embedded_payload(&wrb);
1386 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1423 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1387 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL); 1424 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1425 sizeof(*req), &wrb, NULL);
1388 req->hdr.domain = domain; 1426 req->hdr.domain = domain;
1389 req->capability_flags = cpu_to_le32(cap_flags); 1427 req->capability_flags = cpu_to_le32(cap_flags);
1390 req->enable_flags = cpu_to_le32(en_flags); 1428 req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1422 req = embedded_payload(wrb); 1460 req = embedded_payload(wrb);
1423 1461
1424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1425 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); 1463 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1464 sizeof(*req), wrb, NULL);
1426 req->hdr.domain = domain; 1465 req->hdr.domain = domain;
1427 req->interface_id = cpu_to_le32(interface_id); 1466 req->interface_id = cpu_to_le32(interface_id);
1428 1467
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1452 hdr = nonemb_cmd->va; 1491 hdr = nonemb_cmd->va;
1453 1492
1454 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1493 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1455 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1494 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1495 nonemb_cmd);
1456 1496
1457 /* version 1 of the cmd is not supported only by BE2 */ 1497 /* version 1 of the cmd is not supported only by BE2 */
1458 if (BE2_chip(adapter)) 1498 if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
1472 1512
1473/* Lancer Stats */ 1513/* Lancer Stats */
1474int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1514int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1475 struct be_dma_mem *nonemb_cmd) 1515 struct be_dma_mem *nonemb_cmd)
1476{ 1516{
1477 1517
1478 struct be_mcc_wrb *wrb; 1518 struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1493 req = nonemb_cmd->va; 1533 req = nonemb_cmd->va;
1494 1534
1495 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1496 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1536 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1497 nonemb_cmd); 1537 wrb, nonemb_cmd);
1498 1538
1499 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1539 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1500 req->cmd_params.params.reset_stats = 0; 1540 req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1553 req = embedded_payload(wrb); 1593 req = embedded_payload(wrb);
1554 1594
1555 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1556 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1596 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1597 sizeof(*req), wrb, NULL);
1557 1598
1558 /* version 1 of the cmd is not supported only by BE2 */ 1599 /* version 1 of the cmd is not supported only by BE2 */
1559 if (!BE2_chip(adapter)) 1600 if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1598 req = embedded_payload(wrb); 1639 req = embedded_payload(wrb);
1599 1640
1600 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1601 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1642 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1602 wrb, NULL); 1643 sizeof(*req), wrb, NULL);
1603 1644
1604 be_mcc_notify(adapter); 1645 be_mcc_notify(adapter);
1605 1646
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1625 req = embedded_payload(wrb); 1666 req = embedded_payload(wrb);
1626 1667
1627 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1628 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); 1669 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1670 NULL);
1629 req->fat_operation = cpu_to_le32(QUERY_FAT); 1671 req->fat_operation = cpu_to_le32(QUERY_FAT);
1630 status = be_mcc_notify_wait(adapter); 1672 status = be_mcc_notify_wait(adapter);
1631 if (!status) { 1673 if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1655 1697
1656 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1698 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1657 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1699 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1658 get_fat_cmd.size, 1700 get_fat_cmd.size,
1659 &get_fat_cmd.dma); 1701 &get_fat_cmd.dma);
1660 if (!get_fat_cmd.va) { 1702 if (!get_fat_cmd.va) {
1661 status = -ENOMEM; 1703 status = -ENOMEM;
1662 dev_err(&adapter->pdev->dev, 1704 dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1679 1721
1680 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1722 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1681 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1682 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, 1724 OPCODE_COMMON_MANAGE_FAT, payload_len,
1683 &get_fat_cmd); 1725 wrb, &get_fat_cmd);
1684 1726
1685 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1727 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1686 req->read_log_offset = cpu_to_le32(log_offset); 1728 req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1691 if (!status) { 1733 if (!status) {
1692 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1734 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1693 memcpy(buf + offset, 1735 memcpy(buf + offset,
1694 resp->data_buffer, 1736 resp->data_buffer,
1695 le32_to_cpu(resp->read_log_length)); 1737 le32_to_cpu(resp->read_log_length));
1696 } else { 1738 } else {
1697 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1739 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1698 goto err; 1740 goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1702 } 1744 }
1703err: 1745err:
1704 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1705 get_fat_cmd.va, 1747 get_fat_cmd.va, get_fat_cmd.dma);
1706 get_fat_cmd.dma);
1707 spin_unlock_bh(&adapter->mcc_lock); 1748 spin_unlock_bh(&adapter->mcc_lock);
1708} 1749}
1709 1750
1710/* Uses synchronous mcc */ 1751/* Uses synchronous mcc */
1711int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1752int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1712 char *fw_on_flash) 1753 char *fw_on_flash)
1713{ 1754{
1714 struct be_mcc_wrb *wrb; 1755 struct be_mcc_wrb *wrb;
1715 struct be_cmd_req_get_fw_version *req; 1756 struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1726 req = embedded_payload(wrb); 1767 req = embedded_payload(wrb);
1727 1768
1728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1769 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1729 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); 1770 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1771 NULL);
1730 status = be_mcc_notify_wait(adapter); 1772 status = be_mcc_notify_wait(adapter);
1731 if (!status) { 1773 if (!status) {
1732 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1774 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1759 req = embedded_payload(wrb); 1801 req = embedded_payload(wrb);
1760 1802
1761 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1803 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1762 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1804 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1805 NULL);
1763 1806
1764 req->num_eq = cpu_to_le32(num); 1807 req->num_eq = cpu_to_le32(num);
1765 for (i = 0; i < num; i++) { 1808 for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
1777 1820
1778/* Uses sycnhronous mcc */ 1821/* Uses sycnhronous mcc */
1779int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1822int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1780 u32 num, bool promiscuous) 1823 u32 num)
1781{ 1824{
1782 struct be_mcc_wrb *wrb; 1825 struct be_mcc_wrb *wrb;
1783 struct be_cmd_req_vlan_config *req; 1826 struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1793 req = embedded_payload(wrb); 1836 req = embedded_payload(wrb);
1794 1837
1795 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1838 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1796 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); 1839 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1840 wrb, NULL);
1797 1841
1798 req->interface_id = if_id; 1842 req->interface_id = if_id;
1799 req->promiscuous = promiscuous;
1800 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1843 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1801 req->num_vlan = num; 1844 req->num_vlan = num;
1802 if (!promiscuous) { 1845 memcpy(req->normal_vlan, vtag_array,
1803 memcpy(req->normal_vlan, vtag_array, 1846 req->num_vlan * sizeof(vtag_array[0]));
1804 req->num_vlan * sizeof(vtag_array[0]));
1805 }
1806 1847
1807 status = be_mcc_notify_wait(adapter); 1848 status = be_mcc_notify_wait(adapter);
1808
1809err: 1849err:
1810 spin_unlock_bh(&adapter->mcc_lock); 1850 spin_unlock_bh(&adapter->mcc_lock);
1811 return status; 1851 return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1827 } 1867 }
1828 memset(req, 0, sizeof(*req)); 1868 memset(req, 0, sizeof(*req));
1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1869 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1830 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1870 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1831 wrb, mem); 1871 wrb, mem);
1832 1872
1833 req->if_id = cpu_to_le32(adapter->if_handle); 1873 req->if_id = cpu_to_le32(adapter->if_handle);
1834 if (flags & IFF_PROMISC) { 1874 if (flags & IFF_PROMISC) {
1835 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1875 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1836 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1876 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1837 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1877 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1838 if (value == ON) 1878 if (value == ON)
1839 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1879 req->if_flags =
1840 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1880 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1841 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1881 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1882 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1842 } else if (flags & IFF_ALLMULTI) { 1883 } else if (flags & IFF_ALLMULTI) {
1843 req->if_flags_mask = req->if_flags = 1884 req->if_flags_mask = req->if_flags =
1844 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1885 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1867 } 1908 }
1868 1909
1869 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != 1910 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1870 req->if_flags_mask) { 1911 req->if_flags_mask) {
1871 dev_warn(&adapter->pdev->dev, 1912 dev_warn(&adapter->pdev->dev,
1872 "Cannot set rx filter flags 0x%x\n", 1913 "Cannot set rx filter flags 0x%x\n",
1873 req->if_flags_mask); 1914 req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1905 req = embedded_payload(wrb); 1946 req = embedded_payload(wrb);
1906 1947
1907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1948 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1908 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1949 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1950 wrb, NULL);
1909 1951
1910 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1952 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1911 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1953 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1938 req = embedded_payload(wrb); 1980 req = embedded_payload(wrb);
1939 1981
1940 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1941 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1983 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
1984 wrb, NULL);
1942 1985
1943 status = be_mcc_notify_wait(adapter); 1986 status = be_mcc_notify_wait(adapter);
1944 if (!status) { 1987 if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1968 req = embedded_payload(wrb); 2011 req = embedded_payload(wrb);
1969 2012
1970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1971 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); 2014 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2015 sizeof(*req), wrb, NULL);
1972 2016
1973 status = be_mbox_notify_wait(adapter); 2017 status = be_mbox_notify_wait(adapter);
1974 if (!status) { 2018 if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
2011 req = embedded_payload(wrb); 2055 req = embedded_payload(wrb);
2012 2056
2013 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2057 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2014 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); 2058 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2059 NULL);
2015 2060
2016 status = be_mbox_notify_wait(adapter); 2061 status = be_mbox_notify_wait(adapter);
2017 2062
@@ -2020,47 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
2020} 2065}
2021 2066
2022int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2067int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2023 u32 rss_hash_opts, u16 table_size) 2068 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2024{ 2069{
2025 struct be_mcc_wrb *wrb; 2070 struct be_mcc_wrb *wrb;
2026 struct be_cmd_req_rss_config *req; 2071 struct be_cmd_req_rss_config *req;
2027 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
2028 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
2029 0x3ea83c02, 0x4a110304};
2030 int status; 2072 int status;
2031 2073
2032 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2074 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2033 return 0; 2075 return 0;
2034 2076
2035 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2077 spin_lock_bh(&adapter->mcc_lock);
2036 return -1;
2037 2078
2038 wrb = wrb_from_mbox(adapter); 2079 wrb = wrb_from_mccq(adapter);
2080 if (!wrb) {
2081 status = -EBUSY;
2082 goto err;
2083 }
2039 req = embedded_payload(wrb); 2084 req = embedded_payload(wrb);
2040 2085
2041 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2086 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2042 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2087 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2043 2088
2044 req->if_id = cpu_to_le32(adapter->if_handle); 2089 req->if_id = cpu_to_le32(adapter->if_handle);
2045 req->enable_rss = cpu_to_le16(rss_hash_opts); 2090 req->enable_rss = cpu_to_le16(rss_hash_opts);
2046 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2091 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2047 2092
2048 if (lancer_chip(adapter) || skyhawk_chip(adapter)) 2093 if (!BEx_chip(adapter))
2049 req->hdr.version = 1; 2094 req->hdr.version = 1;
2050 2095
2051 memcpy(req->cpu_table, rsstable, table_size); 2096 memcpy(req->cpu_table, rsstable, table_size);
2052 memcpy(req->hash, myhash, sizeof(myhash)); 2097 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2053 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2098 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2054 2099
2055 status = be_mbox_notify_wait(adapter); 2100 status = be_mcc_notify_wait(adapter);
2056 2101err:
2057 mutex_unlock(&adapter->mbox_lock); 2102 spin_unlock_bh(&adapter->mcc_lock);
2058 return status; 2103 return status;
2059} 2104}
2060 2105
2061/* Uses sync mcc */ 2106/* Uses sync mcc */
2062int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2107int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2063 u8 bcn, u8 sts, u8 state) 2108 u8 bcn, u8 sts, u8 state)
2064{ 2109{
2065 struct be_mcc_wrb *wrb; 2110 struct be_mcc_wrb *wrb;
2066 struct be_cmd_req_enable_disable_beacon *req; 2111 struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2076 req = embedded_payload(wrb); 2121 req = embedded_payload(wrb);
2077 2122
2078 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2123 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2079 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); 2124 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2125 sizeof(*req), wrb, NULL);
2080 2126
2081 req->port_num = port_num; 2127 req->port_num = port_num;
2082 req->beacon_state = state; 2128 req->beacon_state = state;
@@ -2107,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2107 req = embedded_payload(wrb); 2153 req = embedded_payload(wrb);
2108 2154
2109 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2110 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); 2156 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2157 wrb, NULL);
2111 2158
2112 req->port_num = port_num; 2159 req->port_num = port_num;
2113 2160
@@ -2146,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2146 req = embedded_payload(wrb); 2193 req = embedded_payload(wrb);
2147 2194
2148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2195 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2149 OPCODE_COMMON_WRITE_OBJECT, 2196 OPCODE_COMMON_WRITE_OBJECT,
2150 sizeof(struct lancer_cmd_req_write_object), wrb, 2197 sizeof(struct lancer_cmd_req_write_object), wrb,
2151 NULL); 2198 NULL);
2152 2199
2153 ctxt = &req->context; 2200 ctxt = &req->context;
2154 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2201 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2155 write_length, ctxt, data_size); 2202 write_length, ctxt, data_size);
2156 2203
2157 if (data_size == 0) 2204 if (data_size == 0)
2158 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2205 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2159 eof, ctxt, 1); 2206 eof, ctxt, 1);
2160 else 2207 else
2161 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2208 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2162 eof, ctxt, 0); 2209 eof, ctxt, 0);
2163 2210
2164 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2211 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2165 req->write_offset = cpu_to_le32(data_offset); 2212 req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2167 req->descriptor_count = cpu_to_le32(1); 2214 req->descriptor_count = cpu_to_le32(1);
2168 req->buf_len = cpu_to_le32(data_size); 2215 req->buf_len = cpu_to_le32(data_size);
2169 req->addr_low = cpu_to_le32((cmd->dma + 2216 req->addr_low = cpu_to_le32((cmd->dma +
2170 sizeof(struct lancer_cmd_req_write_object)) 2217 sizeof(struct lancer_cmd_req_write_object))
2171 & 0xFFFFFFFF); 2218 & 0xFFFFFFFF);
2172 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2219 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2173 sizeof(struct lancer_cmd_req_write_object))); 2220 sizeof(struct lancer_cmd_req_write_object)));
2174 2221
@@ -2197,8 +2244,8 @@ err_unlock:
2197} 2244}
2198 2245
2199int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2246int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2200 u32 data_size, u32 data_offset, const char *obj_name, 2247 u32 data_size, u32 data_offset, const char *obj_name,
2201 u32 *data_read, u32 *eof, u8 *addn_status) 2248 u32 *data_read, u32 *eof, u8 *addn_status)
2202{ 2249{
2203 struct be_mcc_wrb *wrb; 2250 struct be_mcc_wrb *wrb;
2204 struct lancer_cmd_req_read_object *req; 2251 struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2216 req = embedded_payload(wrb); 2263 req = embedded_payload(wrb);
2217 2264
2218 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2265 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2219 OPCODE_COMMON_READ_OBJECT, 2266 OPCODE_COMMON_READ_OBJECT,
2220 sizeof(struct lancer_cmd_req_read_object), wrb, 2267 sizeof(struct lancer_cmd_req_read_object), wrb,
2221 NULL); 2268 NULL);
2222 2269
2223 req->desired_read_len = cpu_to_le32(data_size); 2270 req->desired_read_len = cpu_to_le32(data_size);
2224 req->read_offset = cpu_to_le32(data_offset); 2271 req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2291,7 @@ err_unlock:
2244} 2291}
2245 2292
2246int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2293int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2247 u32 flash_type, u32 flash_opcode, u32 buf_size) 2294 u32 flash_type, u32 flash_opcode, u32 buf_size)
2248{ 2295{
2249 struct be_mcc_wrb *wrb; 2296 struct be_mcc_wrb *wrb;
2250 struct be_cmd_write_flashrom *req; 2297 struct be_cmd_write_flashrom *req;
@@ -2261,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2261 req = cmd->va; 2308 req = cmd->va;
2262 2309
2263 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2264 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); 2311 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2312 cmd);
2265 2313
2266 req->params.op_type = cpu_to_le32(flash_type); 2314 req->params.op_type = cpu_to_le32(flash_type);
2267 req->params.op_code = cpu_to_le32(flash_opcode); 2315 req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2284,7 +2332,7 @@ err_unlock:
2284} 2332}
2285 2333
2286int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2334int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2287 int offset) 2335 u16 optype, int offset)
2288{ 2336{
2289 struct be_mcc_wrb *wrb; 2337 struct be_mcc_wrb *wrb;
2290 struct be_cmd_read_flash_crc *req; 2338 struct be_cmd_read_flash_crc *req;
@@ -2303,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2303 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2351 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2304 wrb, NULL); 2352 wrb, NULL);
2305 2353
2306 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 2354 req->params.op_type = cpu_to_le32(optype);
2307 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2355 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2308 req->params.offset = cpu_to_le32(offset); 2356 req->params.offset = cpu_to_le32(offset);
2309 req->params.data_buf_size = cpu_to_le32(0x4); 2357 req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2318,7 +2366,7 @@ err:
2318} 2366}
2319 2367
2320int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2368int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2321 struct be_dma_mem *nonemb_cmd) 2369 struct be_dma_mem *nonemb_cmd)
2322{ 2370{
2323 struct be_mcc_wrb *wrb; 2371 struct be_mcc_wrb *wrb;
2324 struct be_cmd_req_acpi_wol_magic_config *req; 2372 struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2334 req = nonemb_cmd->va; 2382 req = nonemb_cmd->va;
2335 2383
2336 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2337 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, 2385 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2338 nonemb_cmd); 2386 wrb, nonemb_cmd);
2339 memcpy(req->magic_mac, mac, ETH_ALEN); 2387 memcpy(req->magic_mac, mac, ETH_ALEN);
2340 2388
2341 status = be_mcc_notify_wait(adapter); 2389 status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2363 req = embedded_payload(wrb); 2411 req = embedded_payload(wrb);
2364 2412
2365 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2413 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2366 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, 2414 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2367 NULL); 2415 wrb, NULL);
2368 2416
2369 req->src_port = port_num; 2417 req->src_port = port_num;
2370 req->dest_port = port_num; 2418 req->dest_port = port_num;
@@ -2378,7 +2426,8 @@ err:
2378} 2426}
2379 2427
2380int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2428int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2429 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2430 u64 pattern)
2382{ 2431{
2383 struct be_mcc_wrb *wrb; 2432 struct be_mcc_wrb *wrb;
2384 struct be_cmd_req_loopback_test *req; 2433 struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2396 req = embedded_payload(wrb); 2445 req = embedded_payload(wrb);
2397 2446
2398 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2447 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2399 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2448 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2449 NULL);
2400 2450
2401 req->hdr.timeout = cpu_to_le32(15); 2451 req->hdr.timeout = cpu_to_le32(15);
2402 req->pattern = cpu_to_le64(pattern); 2452 req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2471,7 @@ err:
2421} 2471}
2422 2472
2423int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2473int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2424 u32 byte_cnt, struct be_dma_mem *cmd) 2474 u32 byte_cnt, struct be_dma_mem *cmd)
2425{ 2475{
2426 struct be_mcc_wrb *wrb; 2476 struct be_mcc_wrb *wrb;
2427 struct be_cmd_req_ddrdma_test *req; 2477 struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2437 } 2487 }
2438 req = cmd->va; 2488 req = cmd->va;
2439 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2489 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2440 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); 2490 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2491 cmd);
2441 2492
2442 req->pattern = cpu_to_le64(pattern); 2493 req->pattern = cpu_to_le64(pattern);
2443 req->byte_count = cpu_to_le32(byte_cnt); 2494 req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2516,7 @@ err:
2465} 2516}
2466 2517
2467int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2518int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2468 struct be_dma_mem *nonemb_cmd) 2519 struct be_dma_mem *nonemb_cmd)
2469{ 2520{
2470 struct be_mcc_wrb *wrb; 2521 struct be_mcc_wrb *wrb;
2471 struct be_cmd_req_seeprom_read *req; 2522 struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2481 req = nonemb_cmd->va; 2532 req = nonemb_cmd->va;
2482 2533
2483 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2534 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2484 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2535 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2485 nonemb_cmd); 2536 nonemb_cmd);
2486 2537
2487 status = be_mcc_notify_wait(adapter); 2538 status = be_mcc_notify_wait(adapter);
2488 2539
@@ -2510,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2510 goto err; 2561 goto err;
2511 } 2562 }
2512 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2563 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2513 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2564 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2514 &cmd.dma);
2515 if (!cmd.va) { 2565 if (!cmd.va) {
2516 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2566 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2517 status = -ENOMEM; 2567 status = -ENOMEM;
@@ -2521,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2521 req = cmd.va; 2571 req = cmd.va;
2522 2572
2523 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2573 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2524 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2574 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2525 wrb, &cmd); 2575 wrb, &cmd);
2526 2576
2527 status = be_mcc_notify_wait(adapter); 2577 status = be_mcc_notify_wait(adapter);
2528 if (!status) { 2578 if (!status) {
@@ -2544,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2544 BE_SUPPORTED_SPEED_1GBPS; 2594 BE_SUPPORTED_SPEED_1GBPS;
2545 } 2595 }
2546 } 2596 }
2547 pci_free_consistent(adapter->pdev, cmd.size, 2597 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2548 cmd.va, cmd.dma);
2549err: 2598err:
2550 spin_unlock_bh(&adapter->mcc_lock); 2599 spin_unlock_bh(&adapter->mcc_lock);
2551 return status; 2600 return status;
@@ -2568,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2568 req = embedded_payload(wrb); 2617 req = embedded_payload(wrb);
2569 2618
2570 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2619 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2571 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2620 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2572 2621
2573 req->hdr.domain = domain; 2622 req->hdr.domain = domain;
2574 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2623 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2597 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2646 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2598 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2647 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2599 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2648 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2600 &attribs_cmd.dma); 2649 &attribs_cmd.dma);
2601 if (!attribs_cmd.va) { 2650 if (!attribs_cmd.va) {
2602 dev_err(&adapter->pdev->dev, 2651 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2603 "Memory allocation failure\n");
2604 status = -ENOMEM; 2652 status = -ENOMEM;
2605 goto err; 2653 goto err;
2606 } 2654 }
@@ -2613,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2613 req = attribs_cmd.va; 2661 req = attribs_cmd.va;
2614 2662
2615 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2616 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, 2664 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2617 &attribs_cmd); 2665 wrb, &attribs_cmd);
2618 2666
2619 status = be_mbox_notify_wait(adapter); 2667 status = be_mbox_notify_wait(adapter);
2620 if (!status) { 2668 if (!status) {
@@ -2649,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
2649 req = embedded_payload(wrb); 2697 req = embedded_payload(wrb);
2650 2698
2651 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2699 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2652 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); 2700 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2701 sizeof(*req), wrb, NULL);
2653 2702
2654 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2703 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2655 CAPABILITY_BE3_NATIVE_ERX_API); 2704 CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2762 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2811 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2763 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2812 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2764 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2813 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2765 get_mac_list_cmd.size, 2814 get_mac_list_cmd.size,
2766 &get_mac_list_cmd.dma); 2815 &get_mac_list_cmd.dma);
2767 2816
2768 if (!get_mac_list_cmd.va) { 2817 if (!get_mac_list_cmd.va) {
2769 dev_err(&adapter->pdev->dev, 2818 dev_err(&adapter->pdev->dev,
2770 "Memory allocation failure during GET_MAC_LIST\n"); 2819 "Memory allocation failure during GET_MAC_LIST\n");
2771 return -ENOMEM; 2820 return -ENOMEM;
2772 } 2821 }
2773 2822
@@ -2831,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2831 /* If no active mac_id found, return first mac addr */ 2880 /* If no active mac_id found, return first mac addr */
2832 *pmac_id_valid = false; 2881 *pmac_id_valid = false;
2833 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2882 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2834 ETH_ALEN); 2883 ETH_ALEN);
2835 } 2884 }
2836 2885
2837out: 2886out:
2838 spin_unlock_bh(&adapter->mcc_lock); 2887 spin_unlock_bh(&adapter->mcc_lock);
2839 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2888 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2840 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2889 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2841 return status; 2890 return status;
2842} 2891}
2843 2892
2844int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, 2893int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
2845 u32 if_handle, bool active, u32 domain) 2894 u8 *mac, u32 if_handle, bool active, u32 domain)
2846{ 2895{
2847 2896
2848 if (!active) 2897 if (!active)
@@ -2892,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2892 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2941 memset(&cmd, 0, sizeof(struct be_dma_mem));
2893 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2942 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2894 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2943 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2895 &cmd.dma, GFP_KERNEL); 2944 &cmd.dma, GFP_KERNEL);
2896 if (!cmd.va) 2945 if (!cmd.va)
2897 return -ENOMEM; 2946 return -ENOMEM;
2898 2947
@@ -2906,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2906 2955
2907 req = cmd.va; 2956 req = cmd.va;
2908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2957 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2909 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2958 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2910 wrb, &cmd); 2959 wrb, &cmd);
2911 2960
2912 req->hdr.domain = domain; 2961 req->hdr.domain = domain;
2913 req->mac_count = mac_count; 2962 req->mac_count = mac_count;
@@ -2917,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2917 status = be_mcc_notify_wait(adapter); 2966 status = be_mcc_notify_wait(adapter);
2918 2967
2919err: 2968err:
2920 dma_free_coherent(&adapter->pdev->dev, cmd.size, 2969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2921 cmd.va, cmd.dma);
2922 spin_unlock_bh(&adapter->mcc_lock); 2970 spin_unlock_bh(&adapter->mcc_lock);
2923 return status; 2971 return status;
2924} 2972}
@@ -2963,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2963 ctxt = &req->context; 3011 ctxt = &req->context;
2964 3012
2965 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2966 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3014 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3015 NULL);
2967 3016
2968 req->hdr.domain = domain; 3017 req->hdr.domain = domain;
2969 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3018 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3009 ctxt = &req->context; 3058 ctxt = &req->context;
3010 3059
3011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3060 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3012 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3061 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3062 NULL);
3013 3063
3014 req->hdr.domain = domain; 3064 req->hdr.domain = domain;
3015 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3065 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3027 if (!status) { 3077 if (!status) {
3028 struct be_cmd_resp_get_hsw_config *resp = 3078 struct be_cmd_resp_get_hsw_config *resp =
3029 embedded_payload(wrb); 3079 embedded_payload(wrb);
3030 be_dws_le_to_cpu(&resp->context, 3080 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3031 sizeof(resp->context));
3032 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3081 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3033 pvid, &resp->context); 3082 pvid, &resp->context);
3034 if (pvid) 3083 if (pvid)
3035 *pvid = le16_to_cpu(vid); 3084 *pvid = le16_to_cpu(vid);
3036 if (mode) 3085 if (mode)
@@ -3062,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3062 3111
3063 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3112 memset(&cmd, 0, sizeof(struct be_dma_mem));
3064 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3113 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3065 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3114 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3066 &cmd.dma);
3067 if (!cmd.va) { 3115 if (!cmd.va) {
3068 dev_err(&adapter->pdev->dev, 3116 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3069 "Memory allocation failure\n");
3070 status = -ENOMEM; 3117 status = -ENOMEM;
3071 goto err; 3118 goto err;
3072 } 3119 }
@@ -3349,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3349 3396
3350 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3397 memset(&cmd, 0, sizeof(struct be_dma_mem));
3351 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3398 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3352 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3399 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3353 &cmd.dma);
3354 if (!cmd.va) { 3400 if (!cmd.va) {
3355 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3401 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3356 status = -ENOMEM; 3402 status = -ENOMEM;
@@ -3396,7 +3442,7 @@ err:
3396 3442
3397/* Uses mbox */ 3443/* Uses mbox */
3398static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3444static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3399 u8 domain, struct be_dma_mem *cmd) 3445 u8 domain, struct be_dma_mem *cmd)
3400{ 3446{
3401 struct be_mcc_wrb *wrb; 3447 struct be_mcc_wrb *wrb;
3402 struct be_cmd_req_get_profile_config *req; 3448 struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3424 3470
3425/* Uses sync mcc */ 3471/* Uses sync mcc */
3426static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3472static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3427 u8 domain, struct be_dma_mem *cmd) 3473 u8 domain, struct be_dma_mem *cmd)
3428{ 3474{
3429 struct be_mcc_wrb *wrb; 3475 struct be_mcc_wrb *wrb;
3430 struct be_cmd_req_get_profile_config *req; 3476 struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3484 resp = cmd.va; 3530 resp = cmd.va;
3485 desc_count = le32_to_cpu(resp->desc_count); 3531 desc_count = le32_to_cpu(resp->desc_count);
3486 3532
3487 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3533 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3488 desc_count); 3534 desc_count);
3489 if (pcie) 3535 if (pcie)
3490 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3536 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3491 3537
@@ -3548,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
3548 nic->cq_count = 0xFFFF; 3594 nic->cq_count = 0xFFFF;
3549 nic->toe_conn_count = 0xFFFF; 3595 nic->toe_conn_count = 0xFFFF;
3550 nic->eq_count = 0xFFFF; 3596 nic->eq_count = 0xFFFF;
3597 nic->iface_count = 0xFFFF;
3551 nic->link_param = 0xFF; 3598 nic->link_param = 0xFF;
3599 nic->channel_id_param = cpu_to_le16(0xF000);
3552 nic->acpi_params = 0xFF; 3600 nic->acpi_params = 0xFF;
3553 nic->wol_param = 0x0F; 3601 nic->wol_param = 0x0F;
3554 nic->bw_min = 0xFFFFFFFF; 3602 nic->tunnel_iface_count = 0xFFFF;
3603 nic->direct_tenant_iface_count = 0xFFFF;
3555 nic->bw_max = 0xFFFFFFFF; 3604 nic->bw_max = 0xFFFFFFFF;
3556} 3605}
3557 3606
3558int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain) 3607int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3608 u8 domain)
3559{ 3609{
3560 if (lancer_chip(adapter)) { 3610 struct be_nic_res_desc nic_desc;
3561 struct be_nic_res_desc nic_desc; 3611 u32 bw_percent;
3612 u16 version = 0;
3613
3614 if (BE3_chip(adapter))
3615 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3562 3616
3563 be_reset_nic_desc(&nic_desc); 3617 be_reset_nic_desc(&nic_desc);
3618 nic_desc.pf_num = adapter->pf_number;
3619 nic_desc.vf_num = domain;
3620 if (lancer_chip(adapter)) {
3564 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3621 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3565 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3622 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3566 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3623 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3567 (1 << NOSV_SHIFT); 3624 (1 << NOSV_SHIFT);
3568 nic_desc.pf_num = adapter->pf_number; 3625 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3569 nic_desc.vf_num = domain;
3570 nic_desc.bw_max = cpu_to_le32(bps);
3571
3572 return be_cmd_set_profile_config(adapter, &nic_desc,
3573 RESOURCE_DESC_SIZE_V0,
3574 0, domain);
3575 } else { 3626 } else {
3576 return be_cmd_set_qos(adapter, bps, domain); 3627 version = 1;
3628 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3629 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3630 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3631 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3632 nic_desc.bw_max = cpu_to_le32(bw_percent);
3577 } 3633 }
3634
3635 return be_cmd_set_profile_config(adapter, &nic_desc,
3636 nic_desc.hdr.desc_len,
3637 version, domain);
3578} 3638}
3579 3639
3580int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3640int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3859,7 +3919,7 @@ err:
3859} 3919}
3860 3920
3861int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3921int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3862 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3922 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3863{ 3923{
3864 struct be_adapter *adapter = netdev_priv(netdev_handle); 3924 struct be_adapter *adapter = netdev_priv(netdev_handle);
3865 struct be_mcc_wrb *wrb; 3925 struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..3e0a6b243806 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27) 50#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
51 51
52/* Completion Status */ 52/* Completion Status */
53enum { 53enum mcc_base_status {
54 MCC_STATUS_SUCCESS = 0, 54 MCC_STATUS_SUCCESS = 0,
55 MCC_STATUS_FAILED = 1, 55 MCC_STATUS_FAILED = 1,
56 MCC_STATUS_ILLEGAL_REQUEST = 2, 56 MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16 63/* Additional status */
64enum mcc_addl_status {
65 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
66 MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
67 MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
68};
69
70#define CQE_BASE_STATUS_MASK 0xFFFF
71#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
72#define CQE_ADDL_STATUS_MASK 0xFF
73#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
64 74
65#define CQE_STATUS_COMPL_MASK 0xFFFF 75#define base_status(status) \
66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 76 ((enum mcc_base_status) \
67#define CQE_STATUS_EXTD_MASK 0xFFFF 77 (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
68#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */ 78#define addl_status(status) \
79 ((enum mcc_addl_status) \
80 (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
81 CQE_ADDL_STATUS_MASK : 0))
69 82
70struct be_mcc_compl { 83struct be_mcc_compl {
71 u32 status; /* dword 0 */ 84 u32 status; /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
74 u32 flags; /* dword 3 */ 87 u32 flags; /* dword 3 */
75}; 88};
76 89
77/* When the async bit of mcc_compl is set, the last 4 bytes of 90/* When the async bit of mcc_compl flags is set, flags
78 * mcc_compl is interpreted as follows: 91 * is interpreted as follows:
79 */ 92 */
80#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ 93#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
81#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF 94#define ASYNC_EVENT_CODE_MASK 0xFF
82#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 95#define ASYNC_EVENT_TYPE_SHIFT 16
83#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF 96#define ASYNC_EVENT_TYPE_MASK 0xFF
84#define ASYNC_EVENT_CODE_LINK_STATE 0x1 97#define ASYNC_EVENT_CODE_LINK_STATE 0x1
85#define ASYNC_EVENT_CODE_GRP_5 0x5 98#define ASYNC_EVENT_CODE_GRP_5 0x5
86#define ASYNC_EVENT_QOS_SPEED 0x1 99#define ASYNC_EVENT_QOS_SPEED 0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
89#define ASYNC_EVENT_CODE_QNQ 0x6 102#define ASYNC_EVENT_CODE_QNQ 0x6
90#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1 103#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
91 104
92struct be_async_event_trailer {
93 u32 code;
94};
95
96enum { 105enum {
97 LINK_DOWN = 0x0, 106 LINK_DOWN = 0x0,
98 LINK_UP = 0x1 107 LINK_UP = 0x1
@@ -100,7 +109,7 @@ enum {
100#define LINK_STATUS_MASK 0x1 109#define LINK_STATUS_MASK 0x1
101#define LOGICAL_LINK_STATUS_MASK 0x2 110#define LOGICAL_LINK_STATUS_MASK 0x2
102 111
103/* When the event code of an async trailer is link-state, the mcc_compl 112/* When the event code of compl->flags is link-state, the mcc_compl
104 * must be interpreted as follows 113 * must be interpreted as follows
105 */ 114 */
106struct be_async_event_link_state { 115struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
110 u8 port_speed; 119 u8 port_speed;
111 u8 port_fault; 120 u8 port_fault;
112 u8 rsvd0[7]; 121 u8 rsvd0[7];
113 struct be_async_event_trailer trailer; 122 u32 flags;
114} __packed; 123} __packed;
115 124
116/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED 125/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
117 * the mcc_compl must be interpreted as follows 126 * the mcc_compl must be interpreted as follows
118 */ 127 */
119struct be_async_event_grp5_qos_link_speed { 128struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
121 u8 rsvd[5]; 130 u8 rsvd[5];
122 u16 qos_link_speed; 131 u16 qos_link_speed;
123 u32 event_tag; 132 u32 event_tag;
124 struct be_async_event_trailer trailer; 133 u32 flags;
125} __packed; 134} __packed;
126 135
127/* When the event code of an async trailer is GRP5 and event type is 136/* When the event code of compl->flags is GRP5 and event type is
128 * CoS-Priority, the mcc_compl must be interpreted as follows 137 * CoS-Priority, the mcc_compl must be interpreted as follows
129 */ 138 */
130struct be_async_event_grp5_cos_priority { 139struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
134 u8 valid; 143 u8 valid;
135 u8 rsvd0; 144 u8 rsvd0;
136 u8 event_tag; 145 u8 event_tag;
137 struct be_async_event_trailer trailer; 146 u32 flags;
138} __packed; 147} __packed;
139 148
140/* When the event code of an async trailer is GRP5 and event type is 149/* When the event code of compl->flags is GRP5 and event type is
141 * PVID state, the mcc_compl must be interpreted as follows 150 * PVID state, the mcc_compl must be interpreted as follows
142 */ 151 */
143struct be_async_event_grp5_pvid_state { 152struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
146 u16 tag; 155 u16 tag;
147 u32 event_tag; 156 u32 event_tag;
148 u32 rsvd1; 157 u32 rsvd1;
149 struct be_async_event_trailer trailer; 158 u32 flags;
150} __packed; 159} __packed;
151 160
152/* async event indicating outer VLAN tag in QnQ */ 161/* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
156 u16 vlan_tag; 165 u16 vlan_tag;
157 u32 event_tag; 166 u32 event_tag;
158 u8 rsvd1[4]; 167 u8 rsvd1[4];
159 struct be_async_event_trailer trailer; 168 u32 flags;
160} __packed; 169} __packed;
161 170
162struct be_mcc_mailbox { 171struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
258 u8 opcode; /* dword 0 */ 267 u8 opcode; /* dword 0 */
259 u8 subsystem; /* dword 0 */ 268 u8 subsystem; /* dword 0 */
260 u8 rsvd[2]; /* dword 0 */ 269 u8 rsvd[2]; /* dword 0 */
261 u8 status; /* dword 1 */ 270 u8 base_status; /* dword 1 */
262 u8 add_status; /* dword 1 */ 271 u8 addl_status; /* dword 1 */
263 u8 rsvd1[2]; /* dword 1 */ 272 u8 rsvd1[2]; /* dword 1 */
264 u32 response_length; /* dword 2 */ 273 u32 response_length; /* dword 2 */
265 u32 actual_resp_len; /* dword 3 */ 274 u32 actual_resp_len; /* dword 3 */
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
1186 struct flashrom_params params; 1195 struct flashrom_params params;
1187 u8 crc[4]; 1196 u8 crc[4];
1188 u8 rsvd[4]; 1197 u8 rsvd[4];
1189}; 1198} __packed;
1199
1190/**************** Lancer Firmware Flash ************/ 1200/**************** Lancer Firmware Flash ************/
1191struct amap_lancer_write_obj_context { 1201struct amap_lancer_write_obj_context {
1192 u8 write_length[24]; 1202 u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
1891 u16 cq_count; 1901 u16 cq_count;
1892 u16 toe_conn_count; 1902 u16 toe_conn_count;
1893 u16 eq_count; 1903 u16 eq_count;
1894 u32 rsvd5; 1904 u16 vlan_id;
1905 u16 iface_count;
1895 u32 cap_flags; 1906 u32 cap_flags;
1896 u8 link_param; 1907 u8 link_param;
1897 u8 rsvd6[3]; 1908 u8 rsvd6;
1909 u16 channel_id_param;
1898 u32 bw_min; 1910 u32 bw_min;
1899 u32 bw_max; 1911 u32 bw_max;
1900 u8 acpi_params; 1912 u8 acpi_params;
1901 u8 wol_param; 1913 u8 wol_param;
1902 u16 rsvd7; 1914 u16 rsvd7;
1903 u32 rsvd8[7]; 1915 u16 tunnel_iface_count;
1916 u16 direct_tenant_iface_count;
1917 u32 rsvd8[6];
1904} __packed; 1918} __packed;
1905 1919
1906/************ Multi-Channel type ***********/ 1920/************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
2060 char *fw_on_flash); 2074 char *fw_on_flash);
2061int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2075int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2062int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2076int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2063 u32 num, bool promiscuous); 2077 u32 num);
2064int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2078int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2065int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2079int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2066int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2080int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2082,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
2068 u32 *function_mode, u32 *function_caps, u16 *asic_rev); 2082 u32 *function_mode, u32 *function_caps, u16 *asic_rev);
2069int be_cmd_reset_function(struct be_adapter *adapter); 2083int be_cmd_reset_function(struct be_adapter *adapter);
2070int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2084int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2071 u32 rss_hash_opts, u16 table_size); 2085 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
2072int be_process_mcc(struct be_adapter *adapter); 2086int be_process_mcc(struct be_adapter *adapter);
2073int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, 2087int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
2074 u8 status, u8 state); 2088 u8 status, u8 state);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2084 u32 data_size, u32 data_offset, const char *obj_name, 2098 u32 data_size, u32 data_offset, const char *obj_name,
2085 u32 *data_read, u32 *eof, u8 *addn_status); 2099 u32 *data_read, u32 *eof, u8 *addn_status);
2086int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2100int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2087 int offset); 2101 u16 optype, int offset);
2088int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2102int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2089 struct be_dma_mem *nonemb_cmd); 2103 struct be_dma_mem *nonemb_cmd);
2090int be_cmd_fw_init(struct be_adapter *adapter); 2104int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2101int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2115int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2102 u8 loopback_type, u8 enable); 2116 u8 loopback_type, u8 enable);
2103int be_cmd_get_phy_info(struct be_adapter *adapter); 2117int be_cmd_get_phy_info(struct be_adapter *adapter);
2104int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain); 2118int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
2119 u16 link_speed, u8 domain);
2105void be_detect_error(struct be_adapter *adapter); 2120void be_detect_error(struct be_adapter *adapter);
2106int be_cmd_get_die_temperature(struct be_adapter *adapter); 2121int be_cmd_get_die_temperature(struct be_adapter *adapter);
2107int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 2122int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..e2da4d20dd3d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ 132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ 133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
134 {DRVSTAT_RX_INFO(rx_compl)}, 134 {DRVSTAT_RX_INFO(rx_compl)},
135 {DRVSTAT_RX_INFO(rx_compl_err)},
135 {DRVSTAT_RX_INFO(rx_mcast_pkts)}, 136 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
136 /* Number of page allocation failures while posting receive buffers 137 /* Number of page allocation failures while posting receive buffers
137 * to HW. 138 * to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
181#define BE_NO_LOOPBACK 0xff 182#define BE_NO_LOOPBACK 0xff
182 183
183static void be_get_drvinfo(struct net_device *netdev, 184static void be_get_drvinfo(struct net_device *netdev,
184 struct ethtool_drvinfo *drvinfo) 185 struct ethtool_drvinfo *drvinfo)
185{ 186{
186 struct be_adapter *adapter = netdev_priv(netdev); 187 struct be_adapter *adapter = netdev_priv(netdev);
187 188
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
201 drvinfo->eedump_len = 0; 202 drvinfo->eedump_len = 0;
202} 203}
203 204
204static u32 205static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
205lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
206{ 206{
207 u32 data_read = 0, eof; 207 u32 data_read = 0, eof;
208 u8 addn_status; 208 u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
212 memset(&data_len_cmd, 0, sizeof(data_len_cmd)); 212 memset(&data_len_cmd, 0, sizeof(data_len_cmd));
213 /* data_offset and data_size should be 0 to get reg len */ 213 /* data_offset and data_size should be 0 to get reg len */
214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, 214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
215 file_name, &data_read, &eof, &addn_status); 215 file_name, &data_read, &eof,
216 &addn_status);
216 217
217 return data_read; 218 return data_read;
218} 219}
219 220
220static int 221static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
221lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, 222 u32 buf_len, void *buf)
222 u32 buf_len, void *buf)
223{ 223{
224 struct be_dma_mem read_cmd; 224 struct be_dma_mem read_cmd;
225 u32 read_len = 0, total_read_len = 0, chunk_size; 225 u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
229 229
230 read_cmd.size = LANCER_READ_FILE_CHUNK; 230 read_cmd.size = LANCER_READ_FILE_CHUNK;
231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
232 &read_cmd.dma); 232 &read_cmd.dma);
233 233
234 if (!read_cmd.va) { 234 if (!read_cmd.va) {
235 dev_err(&adapter->pdev->dev, 235 dev_err(&adapter->pdev->dev,
236 "Memory allocation failure while reading dump\n"); 236 "Memory allocation failure while reading dump\n");
237 return -ENOMEM; 237 return -ENOMEM;
238 } 238 }
239 239
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
242 LANCER_READ_FILE_CHUNK); 242 LANCER_READ_FILE_CHUNK);
243 chunk_size = ALIGN(chunk_size, 4); 243 chunk_size = ALIGN(chunk_size, 4);
244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, 244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
245 total_read_len, file_name, &read_len, 245 total_read_len, file_name,
246 &eof, &addn_status); 246 &read_len, &eof, &addn_status);
247 if (!status) { 247 if (!status) {
248 memcpy(buf + total_read_len, read_cmd.va, read_len); 248 memcpy(buf + total_read_len, read_cmd.va, read_len);
249 total_read_len += read_len; 249 total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
254 } 254 }
255 } 255 }
256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
257 read_cmd.dma); 257 read_cmd.dma);
258 258
259 return status; 259 return status;
260} 260}
261 261
262static int 262static int be_get_reg_len(struct net_device *netdev)
263be_get_reg_len(struct net_device *netdev)
264{ 263{
265 struct be_adapter *adapter = netdev_priv(netdev); 264 struct be_adapter *adapter = netdev_priv(netdev);
266 u32 log_size = 0; 265 u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
271 if (be_physfn(adapter)) { 270 if (be_physfn(adapter)) {
272 if (lancer_chip(adapter)) 271 if (lancer_chip(adapter))
273 log_size = lancer_cmd_get_file_len(adapter, 272 log_size = lancer_cmd_get_file_len(adapter,
274 LANCER_FW_DUMP_FILE); 273 LANCER_FW_DUMP_FILE);
275 else 274 else
276 be_cmd_get_reg_len(adapter, &log_size); 275 be_cmd_get_reg_len(adapter, &log_size);
277 } 276 }
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
287 memset(buf, 0, regs->len); 286 memset(buf, 0, regs->len);
288 if (lancer_chip(adapter)) 287 if (lancer_chip(adapter))
289 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, 288 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
290 regs->len, buf); 289 regs->len, buf);
291 else 290 else
292 be_cmd_get_regs(adapter, regs->len, buf); 291 be_cmd_get_regs(adapter, regs->len, buf);
293 } 292 }
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
337 return 0; 336 return 0;
338} 337}
339 338
340static void 339static void be_get_ethtool_stats(struct net_device *netdev,
341be_get_ethtool_stats(struct net_device *netdev, 340 struct ethtool_stats *stats, uint64_t *data)
342 struct ethtool_stats *stats, uint64_t *data)
343{ 341{
344 struct be_adapter *adapter = netdev_priv(netdev); 342 struct be_adapter *adapter = netdev_priv(netdev);
345 struct be_rx_obj *rxo; 343 struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
390 } 388 }
391} 389}
392 390
393static void 391static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
394be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 392 uint8_t *data)
395 uint8_t *data)
396{ 393{
397 struct be_adapter *adapter = netdev_priv(netdev); 394 struct be_adapter *adapter = netdev_priv(netdev);
398 int i, j; 395 int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
642 adapter->rx_fc = ecmd->rx_pause; 639 adapter->rx_fc = ecmd->rx_pause;
643 640
644 status = be_cmd_set_flow_control(adapter, 641 status = be_cmd_set_flow_control(adapter,
645 adapter->tx_fc, adapter->rx_fc); 642 adapter->tx_fc, adapter->rx_fc);
646 if (status) 643 if (status)
647 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); 644 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
648 645
649 return status; 646 return status;
650} 647}
651 648
652static int 649static int be_set_phys_id(struct net_device *netdev,
653be_set_phys_id(struct net_device *netdev, 650 enum ethtool_phys_id_state state)
654 enum ethtool_phys_id_state state)
655{ 651{
656 struct be_adapter *adapter = netdev_priv(netdev); 652 struct be_adapter *adapter = netdev_priv(netdev);
657 653
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
708 return status; 704 return status;
709} 705}
710 706
711static void 707static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
712be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
713{ 708{
714 struct be_adapter *adapter = netdev_priv(netdev); 709 struct be_adapter *adapter = netdev_priv(netdev);
715 710
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
723 memset(&wol->sopass, 0, sizeof(wol->sopass)); 718 memset(&wol->sopass, 0, sizeof(wol->sopass));
724} 719}
725 720
726static int 721static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
727be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
728{ 722{
729 struct be_adapter *adapter = netdev_priv(netdev); 723 struct be_adapter *adapter = netdev_priv(netdev);
730 724
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
744 return 0; 738 return 0;
745} 739}
746 740
747static int 741static int be_test_ddr_dma(struct be_adapter *adapter)
748be_test_ddr_dma(struct be_adapter *adapter)
749{ 742{
750 int ret, i; 743 int ret, i;
751 struct be_dma_mem ddrdma_cmd; 744 struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
761 754
762 for (i = 0; i < 2; i++) { 755 for (i = 0; i < 2; i++) {
763 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 756 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
764 4096, &ddrdma_cmd); 757 4096, &ddrdma_cmd);
765 if (ret != 0) 758 if (ret != 0)
766 goto err; 759 goto err;
767 } 760 }
@@ -773,20 +766,17 @@ err:
773} 766}
774 767
775static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 768static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
776 u64 *status) 769 u64 *status)
777{ 770{
778 be_cmd_set_loopback(adapter, adapter->hba_port_num, 771 be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
779 loopback_type, 1);
780 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, 772 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
781 loopback_type, 1500, 773 loopback_type, 1500, 2, 0xabc);
782 2, 0xabc); 774 be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
783 be_cmd_set_loopback(adapter, adapter->hba_port_num,
784 BE_NO_LOOPBACK, 1);
785 return *status; 775 return *status;
786} 776}
787 777
788static void 778static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
789be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 779 u64 *data)
790{ 780{
791 struct be_adapter *adapter = netdev_priv(netdev); 781 struct be_adapter *adapter = netdev_priv(netdev);
792 int status; 782 int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
801 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 791 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
802 792
803 if (test->flags & ETH_TEST_FL_OFFLINE) { 793 if (test->flags & ETH_TEST_FL_OFFLINE) {
804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 794 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
805 &data[0]) != 0)
806 test->flags |= ETH_TEST_FL_FAILED; 795 test->flags |= ETH_TEST_FL_FAILED;
807 796
808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 797 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
809 &data[1]) != 0)
810 test->flags |= ETH_TEST_FL_FAILED; 798 test->flags |= ETH_TEST_FL_FAILED;
811 799
812 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { 800 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
832 } 820 }
833} 821}
834 822
835static int 823static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
836be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
837{ 824{
838 struct be_adapter *adapter = netdev_priv(netdev); 825 struct be_adapter *adapter = netdev_priv(netdev);
839 826
840 return be_load_fw(adapter, efl->data); 827 return be_load_fw(adapter, efl->data);
841} 828}
842 829
843static int 830static int be_get_eeprom_len(struct net_device *netdev)
844be_get_eeprom_len(struct net_device *netdev)
845{ 831{
846 struct be_adapter *adapter = netdev_priv(netdev); 832 struct be_adapter *adapter = netdev_priv(netdev);
847 833
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
851 if (lancer_chip(adapter)) { 837 if (lancer_chip(adapter)) {
852 if (be_physfn(adapter)) 838 if (be_physfn(adapter))
853 return lancer_cmd_get_file_len(adapter, 839 return lancer_cmd_get_file_len(adapter,
854 LANCER_VPD_PF_FILE); 840 LANCER_VPD_PF_FILE);
855 else 841 else
856 return lancer_cmd_get_file_len(adapter, 842 return lancer_cmd_get_file_len(adapter,
857 LANCER_VPD_VF_FILE); 843 LANCER_VPD_VF_FILE);
858 } else { 844 } else {
859 return BE_READ_SEEPROM_LEN; 845 return BE_READ_SEEPROM_LEN;
860 } 846 }
861} 847}
862 848
863static int 849static int be_read_eeprom(struct net_device *netdev,
864be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 850 struct ethtool_eeprom *eeprom, uint8_t *data)
865 uint8_t *data)
866{ 851{
867 struct be_adapter *adapter = netdev_priv(netdev); 852 struct be_adapter *adapter = netdev_priv(netdev);
868 struct be_dma_mem eeprom_cmd; 853 struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
875 if (lancer_chip(adapter)) { 860 if (lancer_chip(adapter)) {
876 if (be_physfn(adapter)) 861 if (be_physfn(adapter))
877 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, 862 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
878 eeprom->len, data); 863 eeprom->len, data);
879 else 864 else
880 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, 865 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
881 eeprom->len, data); 866 eeprom->len, data);
882 } 867 }
883 868
884 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); 869 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
933 918
934 switch (flow_type) { 919 switch (flow_type) {
935 case TCP_V4_FLOW: 920 case TCP_V4_FLOW:
936 if (adapter->rss_flags & RSS_ENABLE_IPV4) 921 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
937 data |= RXH_IP_DST | RXH_IP_SRC; 922 data |= RXH_IP_DST | RXH_IP_SRC;
938 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4) 923 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
939 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 924 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
940 break; 925 break;
941 case UDP_V4_FLOW: 926 case UDP_V4_FLOW:
942 if (adapter->rss_flags & RSS_ENABLE_IPV4) 927 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
943 data |= RXH_IP_DST | RXH_IP_SRC; 928 data |= RXH_IP_DST | RXH_IP_SRC;
944 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4) 929 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
945 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 930 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
946 break; 931 break;
947 case TCP_V6_FLOW: 932 case TCP_V6_FLOW:
948 if (adapter->rss_flags & RSS_ENABLE_IPV6) 933 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
949 data |= RXH_IP_DST | RXH_IP_SRC; 934 data |= RXH_IP_DST | RXH_IP_SRC;
950 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6) 935 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
951 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 936 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
952 break; 937 break;
953 case UDP_V6_FLOW: 938 case UDP_V6_FLOW:
954 if (adapter->rss_flags & RSS_ENABLE_IPV6) 939 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
955 data |= RXH_IP_DST | RXH_IP_SRC; 940 data |= RXH_IP_DST | RXH_IP_SRC;
956 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6) 941 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
957 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 942 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
958 break; 943 break;
959 } 944 }
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
962} 947}
963 948
964static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 949static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
965 u32 *rule_locs) 950 u32 *rule_locs)
966{ 951{
967 struct be_adapter *adapter = netdev_priv(netdev); 952 struct be_adapter *adapter = netdev_priv(netdev);
968 953
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
992 struct be_rx_obj *rxo; 977 struct be_rx_obj *rxo;
993 int status = 0, i, j; 978 int status = 0, i, j;
994 u8 rsstable[128]; 979 u8 rsstable[128];
995 u32 rss_flags = adapter->rss_flags; 980 u32 rss_flags = adapter->rss_info.rss_flags;
996 981
997 if (cmd->data != L3_RSS_FLAGS && 982 if (cmd->data != L3_RSS_FLAGS &&
998 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) 983 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1039 return -EINVAL; 1024 return -EINVAL;
1040 } 1025 }
1041 1026
1042 if (rss_flags == adapter->rss_flags) 1027 if (rss_flags == adapter->rss_info.rss_flags)
1043 return status; 1028 return status;
1044 1029
1045 if (be_multi_rxq(adapter)) { 1030 if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1051 } 1036 }
1052 } 1037 }
1053 } 1038 }
1054 status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128); 1039
1040 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1041 rss_flags, 128, adapter->rss_info.rss_hkey);
1055 if (!status) 1042 if (!status)
1056 adapter->rss_flags = rss_flags; 1043 adapter->rss_info.rss_flags = rss_flags;
1057 1044
1058 return status; 1045 return status;
1059} 1046}
@@ -1103,6 +1090,69 @@ static int be_set_channels(struct net_device *netdev,
1103 return be_update_queues(adapter); 1090 return be_update_queues(adapter);
1104} 1091}
1105 1092
1093static u32 be_get_rxfh_indir_size(struct net_device *netdev)
1094{
1095 return RSS_INDIR_TABLE_LEN;
1096}
1097
1098static u32 be_get_rxfh_key_size(struct net_device *netdev)
1099{
1100 return RSS_HASH_KEY_LEN;
1101}
1102
1103static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
1104{
1105 struct be_adapter *adapter = netdev_priv(netdev);
1106 int i;
1107 struct rss_info *rss = &adapter->rss_info;
1108
1109 if (indir) {
1110 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
1111 indir[i] = rss->rss_queue[i];
1112 }
1113
1114 if (hkey)
1115 memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
1116
1117 return 0;
1118}
1119
1120static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1121 const u8 *hkey)
1122{
1123 int rc = 0, i, j;
1124 struct be_adapter *adapter = netdev_priv(netdev);
1125 u8 rsstable[RSS_INDIR_TABLE_LEN];
1126
1127 if (indir) {
1128 struct be_rx_obj *rxo;
1129 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1130 j = indir[i];
1131 rxo = &adapter->rx_obj[j];
1132 rsstable[i] = rxo->rss_id;
1133 adapter->rss_info.rss_queue[i] = j;
1134 }
1135 } else {
1136 memcpy(rsstable, adapter->rss_info.rsstable,
1137 RSS_INDIR_TABLE_LEN);
1138 }
1139
1140 if (!hkey)
1141 hkey = adapter->rss_info.rss_hkey;
1142
1143 rc = be_cmd_rss_config(adapter, rsstable,
1144 adapter->rss_info.rss_flags,
1145 RSS_INDIR_TABLE_LEN, hkey);
1146 if (rc) {
1147 adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1148 return -EIO;
1149 }
1150 memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
1151 memcpy(adapter->rss_info.rsstable, rsstable,
1152 RSS_INDIR_TABLE_LEN);
1153 return 0;
1154}
1155
1106const struct ethtool_ops be_ethtool_ops = { 1156const struct ethtool_ops be_ethtool_ops = {
1107 .get_settings = be_get_settings, 1157 .get_settings = be_get_settings,
1108 .get_drvinfo = be_get_drvinfo, 1158 .get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {
1129 .self_test = be_self_test, 1179 .self_test = be_self_test,
1130 .get_rxnfc = be_get_rxnfc, 1180 .get_rxnfc = be_get_rxnfc,
1131 .set_rxnfc = be_set_rxnfc, 1181 .set_rxnfc = be_set_rxnfc,
1182 .get_rxfh_indir_size = be_get_rxfh_indir_size,
1183 .get_rxfh_key_size = be_get_rxfh_key_size,
1184 .get_rxfh = be_get_rxfh,
1185 .set_rxfh = be_set_rxfh,
1132 .get_channels = be_get_channels, 1186 .get_channels = be_get_channels,
1133 .set_channels = be_set_channels 1187 .set_channels = be_set_channels
1134}; 1188};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3bd198550edb..8840c64aaeca 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -188,10 +188,14 @@
188#define OPTYPE_FCOE_FW_ACTIVE 10 188#define OPTYPE_FCOE_FW_ACTIVE 10
189#define OPTYPE_FCOE_FW_BACKUP 11 189#define OPTYPE_FCOE_FW_BACKUP 11
190#define OPTYPE_NCSI_FW 13 190#define OPTYPE_NCSI_FW 13
191#define OPTYPE_REDBOOT_DIR 18
192#define OPTYPE_REDBOOT_CONFIG 19
193#define OPTYPE_SH_PHY_FW 21
194#define OPTYPE_FLASHISM_JUMPVECTOR 22
195#define OPTYPE_UFI_DIR 23
191#define OPTYPE_PHY_FW 99 196#define OPTYPE_PHY_FW 99
192#define TN_8022 13 197#define TN_8022 13
193 198
194#define ILLEGAL_IOCTL_REQ 2
195#define FLASHROM_OPER_PHY_FLASH 9 199#define FLASHROM_OPER_PHY_FLASH 9
196#define FLASHROM_OPER_PHY_SAVE 10 200#define FLASHROM_OPER_PHY_SAVE 10
197#define FLASHROM_OPER_FLASH 1 201#define FLASHROM_OPER_FLASH 1
@@ -250,6 +254,9 @@
250#define IMAGE_FIRMWARE_BACKUP_FCoE 178 254#define IMAGE_FIRMWARE_BACKUP_FCoE 178
251#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179 255#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
252#define IMAGE_FIRMWARE_PHY 192 256#define IMAGE_FIRMWARE_PHY 192
257#define IMAGE_REDBOOT_DIR 208
258#define IMAGE_REDBOOT_CONFIG 209
259#define IMAGE_UFI_DIR 210
253#define IMAGE_BOOT_CODE 224 260#define IMAGE_BOOT_CODE 224
254 261
255/************* Rx Packet Type Encoding **************/ 262/************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
534 u32 image_size; 541 u32 image_size;
535 u32 cksum; 542 u32 cksum;
536 u32 entry_point; 543 u32 entry_point;
537 u32 rsvd0; 544 u16 optype;
545 u16 rsvd0;
538 u32 rsvd1; 546 u32 rsvd1;
539 u8 ver_data[32]; 547 u8 ver_data[32];
540} __packed; 548} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5dec77..6822b3d76d85 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134} 134}
135 135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137 u16 len, u16 entry_size) 137 u16 len, u16 entry_size)
138{ 138{
139 struct be_dma_mem *mem = &q->dma_mem; 139 struct be_dma_mem *mem = &q->dma_mem;
140 140
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
154 u32 reg, enabled; 154 u32 reg, enabled;
155 155
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg); 157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159 159
160 if (!enabled && enable) 160 if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
165 return; 165 return;
166 166
167 pci_write_config_dword(adapter->pdev, 167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169} 169}
170 170
171static void be_intr_set(struct be_adapter *adapter, bool enable) 171static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
206} 206}
207 207
208static void be_eq_notify(struct be_adapter *adapter, u16 qid, 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209 bool arm, bool clear_int, u16 num_popped) 209 bool arm, bool clear_int, u16 num_popped)
210{ 210{
211 u32 val = 0; 211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK; 212 val |= qid & DB_EQ_RING_ID_MASK;
213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214 DB_EQ_RING_ID_EXT_MASK_SHIFT);
215 214
216 if (adapter->eeh_error) 215 if (adapter->eeh_error)
217 return; 216 return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
477 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; 476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; 477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
480 if (be_roce_supported(adapter)) { 479 if (be_roce_supported(adapter)) {
481 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; 480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; 481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483 drvs->rx_roce_frames = port_stats->roce_frames_received; 482 drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
491{ 490{
492 491
493 struct be_drv_stats *drvs = &adapter->drv_stats; 492 struct be_drv_stats *drvs = &adapter->drv_stats;
494 struct lancer_pport_stats *pport_stats = 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
495 pport_stats_from_cmd(adapter);
496 494
497 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); 495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; 496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
539} 537}
540 538
541static void populate_erx_stats(struct be_adapter *adapter, 539static void populate_erx_stats(struct be_adapter *adapter,
542 struct be_rx_obj *rxo, 540 struct be_rx_obj *rxo, u32 erx_stat)
543 u32 erx_stat)
544{ 541{
545 if (!BEx_chip(adapter)) 542 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat; 543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
579} 576}
580 577
581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582 struct rtnl_link_stats64 *stats) 579 struct rtnl_link_stats64 *stats)
583{ 580{
584 struct be_adapter *adapter = netdev_priv(netdev); 581 struct be_adapter *adapter = netdev_priv(netdev);
585 struct be_drv_stats *drvs = &adapter->drv_stats; 582 struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
660} 657}
661 658
662static void be_tx_stats_update(struct be_tx_obj *txo, 659static void be_tx_stats_update(struct be_tx_obj *txo,
663 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
664{ 662{
665 struct be_tx_stats *stats = tx_stats(txo); 663 struct be_tx_stats *stats = tx_stats(txo);
666 664
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
676 674
677/* Determine number of WRB entries needed to xmit data in an skb */ 675/* Determine number of WRB entries needed to xmit data in an skb */
678static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679 bool *dummy) 677 bool *dummy)
680{ 678{
681 int cnt = (skb->len > skb->data_len); 679 int cnt = (skb->len > skb->data_len);
682 680
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
704} 702}
705 703
706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707 struct sk_buff *skb) 705 struct sk_buff *skb)
708{ 706{
709 u8 vlan_prio; 707 u8 vlan_prio;
710 u16 vlan_tag; 708 u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
733} 731}
734 732
735static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
736 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) 734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
737{ 736{
738 u16 vlan_tag, proto; 737 u16 vlan_tag, proto;
739 738
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
774} 773}
775 774
776static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
777 bool unmap_single) 776 bool unmap_single)
778{ 777{
779 dma_addr_t dma; 778 dma_addr_t dma;
780 779
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
791} 790}
792 791
793static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
794 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795 bool skip_hw_vlan) 794 bool skip_hw_vlan)
796{ 795{
797 dma_addr_t busaddr; 796 dma_addr_t busaddr;
798 int i, copied = 0; 797 int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
821 } 820 }
822 821
823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
824 const struct skb_frag_struct *frag = 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
825 &skb_shinfo(skb)->frags[i];
826 busaddr = skb_frag_dma_map(dev, frag, 0, 824 busaddr = skb_frag_dma_map(dev, frag, 0,
827 skb_frag_size(frag), DMA_TO_DEVICE); 825 skb_frag_size(frag), DMA_TO_DEVICE);
828 if (dma_mapping_error(dev, busaddr)) 826 if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
927 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928} 926}
929 927
930static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
931 struct sk_buff *skb)
932{ 929{
933 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
934} 931}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
959 */ 956 */
960 if (be_pvid_tagging_enabled(adapter) && 957 if (be_pvid_tagging_enabled(adapter) &&
961 veh->h_vlan_proto == htons(ETH_P_8021Q)) 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
962 *skip_hw_vlan = true; 959 *skip_hw_vlan = true;
963 960
964 /* HW has a bug wherein it will calculate CSUM for VLAN 961 /* HW has a bug wherein it will calculate CSUM for VLAN
965 * pkts even though it is disabled. 962 * pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077{ 1074{
1078 struct be_adapter *adapter = netdev_priv(netdev); 1075 struct be_adapter *adapter = netdev_priv(netdev);
1079 if (new_mtu < BE_MIN_MTU || 1076 if (new_mtu < BE_MIN_MTU ||
1080 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
1081 (ETH_HLEN + ETH_FCS_LEN))) {
1082 dev_info(&adapter->pdev->dev, 1078 dev_info(&adapter->pdev->dev,
1083 "MTU must be between %d and %d bytes\n", 1079 "MTU must be between %d and %d bytes\n",
1084 BE_MIN_MTU, 1080 BE_MIN_MTU,
1085 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); 1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1086 return -EINVAL; 1082 return -EINVAL;
1087 } 1083 }
1088 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089 netdev->mtu, new_mtu); 1085 netdev->mtu, new_mtu);
1090 netdev->mtu = new_mtu; 1086 netdev->mtu = new_mtu;
1091 return 0; 1087 return 0;
1092} 1088}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1098static int be_vid_config(struct be_adapter *adapter) 1094static int be_vid_config(struct be_adapter *adapter)
1099{ 1095{
1100 u16 vids[BE_NUM_VLANS_SUPPORTED]; 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
1101 u16 num = 0, i; 1097 u16 num = 0, i = 0;
1102 int status = 0; 1098 int status = 0;
1103 1099
1104 /* No need to further configure vids if in promiscuous mode */ 1100 /* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
1109 goto set_vlan_promisc; 1105 goto set_vlan_promisc;
1110 1106
1111 /* Construct VLAN Table to give to HW */ 1107 /* Construct VLAN Table to give to HW */
1112 for (i = 0; i < VLAN_N_VID; i++) 1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1113 if (adapter->vlan_tag[i]) 1109 vids[num++] = cpu_to_le16(i);
1114 vids[num++] = cpu_to_le16(i);
1115
1116 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1117 vids, num, 0);
1118 1110
1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1119 if (status) { 1112 if (status) {
1120 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) 1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1122 goto set_vlan_promisc; 1116 goto set_vlan_promisc;
1123 dev_err(&adapter->pdev->dev, 1117 dev_err(&adapter->pdev->dev,
1124 "Setting HW VLAN filtering failed.\n"); 1118 "Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1160 if (lancer_chip(adapter) && vid == 0) 1154 if (lancer_chip(adapter) && vid == 0)
1161 return status; 1155 return status;
1162 1156
1163 if (adapter->vlan_tag[vid]) 1157 if (test_bit(vid, adapter->vids))
1164 return status; 1158 return status;
1165 1159
1166 adapter->vlan_tag[vid] = 1; 1160 set_bit(vid, adapter->vids);
1167 adapter->vlans_added++; 1161 adapter->vlans_added++;
1168 1162
1169 status = be_vid_config(adapter); 1163 status = be_vid_config(adapter);
1170 if (status) { 1164 if (status) {
1171 adapter->vlans_added--; 1165 adapter->vlans_added--;
1172 adapter->vlan_tag[vid] = 0; 1166 clear_bit(vid, adapter->vids);
1173 } 1167 }
1174 1168
1175 return status; 1169 return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1184 if (lancer_chip(adapter) && vid == 0) 1178 if (lancer_chip(adapter) && vid == 0)
1185 goto ret; 1179 goto ret;
1186 1180
1187 adapter->vlan_tag[vid] = 0; 1181 clear_bit(vid, adapter->vids);
1188 status = be_vid_config(adapter); 1182 status = be_vid_config(adapter);
1189 if (!status) 1183 if (!status)
1190 adapter->vlans_added--; 1184 adapter->vlans_added--;
1191 else 1185 else
1192 adapter->vlan_tag[vid] = 1; 1186 set_bit(vid, adapter->vids);
1193ret: 1187ret:
1194 return status; 1188 return status;
1195} 1189}
@@ -1197,7 +1191,7 @@ ret:
1197static void be_clear_promisc(struct be_adapter *adapter) 1191static void be_clear_promisc(struct be_adapter *adapter)
1198{ 1192{
1199 adapter->promiscuous = false; 1193 adapter->promiscuous = false;
1200 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; 1194 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
1201 1195
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1196 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203} 1197}
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
1222 1216
1223 /* Enable multicast promisc if num configured exceeds what we support */ 1217 /* Enable multicast promisc if num configured exceeds what we support */
1224 if (netdev->flags & IFF_ALLMULTI || 1218 if (netdev->flags & IFF_ALLMULTI ||
1225 netdev_mc_count(netdev) > be_max_mc(adapter)) { 1219 netdev_mc_count(netdev) > be_max_mc(adapter))
1226 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1220 goto set_mcast_promisc;
1227 goto done;
1228 }
1229 1221
1230 if (netdev_uc_count(netdev) != adapter->uc_macs) { 1222 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231 struct netdev_hw_addr *ha; 1223 struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
1251 } 1243 }
1252 1244
1253 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 1245 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254 1246 if (!status) {
1255 /* Set to MCAST promisc mode if setting MULTICAST address fails */ 1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 if (status) { 1248 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1257 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); 1249 goto done;
1258 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 } 1250 }
1251
1252set_mcast_promisc:
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 return;
1255
1256 /* Set to MCAST promisc mode if setting MULTICAST address fails
1257 * or if num configured exceeds what we support
1258 */
1259 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 if (!status)
1261 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
1261done: 1262done:
1262 return; 1263 return;
1263} 1264}
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1287 1288
1288 if (status) 1289 if (status)
1289 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290 mac, vf); 1291 mac, vf);
1291 else 1292 else
1292 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 1293 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1293 1294
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1295} 1296}
1296 1297
1297static int be_get_vf_config(struct net_device *netdev, int vf, 1298static int be_get_vf_config(struct net_device *netdev, int vf,
1298 struct ifla_vf_info *vi) 1299 struct ifla_vf_info *vi)
1299{ 1300{
1300 struct be_adapter *adapter = netdev_priv(netdev); 1301 struct be_adapter *adapter = netdev_priv(netdev);
1301 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1302 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1307 return -EINVAL; 1308 return -EINVAL;
1308 1309
1309 vi->vf = vf; 1310 vi->vf = vf;
1310 vi->tx_rate = vf_cfg->tx_rate; 1311 vi->max_tx_rate = vf_cfg->tx_rate;
1312 vi->min_tx_rate = 0;
1311 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; 1313 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; 1314 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1313 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1315 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1316 return 0; 1318 return 0;
1317} 1319}
1318 1320
1319static int be_set_vf_vlan(struct net_device *netdev, 1321static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1320 int vf, u16 vlan, u8 qos)
1321{ 1322{
1322 struct be_adapter *adapter = netdev_priv(netdev); 1323 struct be_adapter *adapter = netdev_priv(netdev);
1323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
1348 return status; 1349 return status;
1349} 1350}
1350 1351
1351static int be_set_vf_tx_rate(struct net_device *netdev, 1352static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1352 int vf, int rate) 1353 int min_tx_rate, int max_tx_rate)
1353{ 1354{
1354 struct be_adapter *adapter = netdev_priv(netdev); 1355 struct be_adapter *adapter = netdev_priv(netdev);
1355 int status = 0; 1356 struct device *dev = &adapter->pdev->dev;
1357 int percent_rate, status = 0;
1358 u16 link_speed = 0;
1359 u8 link_status;
1356 1360
1357 if (!sriov_enabled(adapter)) 1361 if (!sriov_enabled(adapter))
1358 return -EPERM; 1362 return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1360 if (vf >= adapter->num_vfs) 1364 if (vf >= adapter->num_vfs)
1361 return -EINVAL; 1365 return -EINVAL;
1362 1366
1363 if (rate < 100 || rate > 10000) { 1367 if (min_tx_rate)
1364 dev_err(&adapter->pdev->dev,
1365 "tx rate must be between 100 and 10000 Mbps\n");
1366 return -EINVAL; 1368 return -EINVAL;
1369
1370 if (!max_tx_rate)
1371 goto config_qos;
1372
1373 status = be_cmd_link_status_query(adapter, &link_speed,
1374 &link_status, 0);
1375 if (status)
1376 goto err;
1377
1378 if (!link_status) {
1379 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1380 status = -EPERM;
1381 goto err;
1382 }
1383
1384 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1385 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1386 link_speed);
1387 status = -EINVAL;
1388 goto err;
1389 }
1390
1391 /* On Skyhawk the QOS setting must be done only as a % value */
1392 percent_rate = link_speed / 100;
1393 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1394 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1395 percent_rate);
1396 status = -EINVAL;
1397 goto err;
1367 } 1398 }
1368 1399
1369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1); 1400config_qos:
1401 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1370 if (status) 1402 if (status)
1371 dev_err(&adapter->pdev->dev, 1403 goto err;
1372 "tx rate %d on VF %d failed\n", rate, vf); 1404
1373 else 1405 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1374 adapter->vf_cfg[vf].tx_rate = rate; 1406 return 0;
1407
1408err:
1409 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1410 max_tx_rate, vf);
1375 return status; 1411 return status;
1376} 1412}
1377static int be_set_vf_link_state(struct net_device *netdev, int vf, 1413static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
1469} 1505}
1470 1506
1471static void be_rx_stats_update(struct be_rx_obj *rxo, 1507static void be_rx_stats_update(struct be_rx_obj *rxo,
1472 struct be_rx_compl_info *rxcp) 1508 struct be_rx_compl_info *rxcp)
1473{ 1509{
1474 struct be_rx_stats *stats = rx_stats(rxo); 1510 struct be_rx_stats *stats = rx_stats(rxo);
1475 1511
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1566 skb_frag_set_page(skb, 0, page_info->page); 1602 skb_frag_set_page(skb, 0, page_info->page);
1567 skb_shinfo(skb)->frags[0].page_offset = 1603 skb_shinfo(skb)->frags[0].page_offset =
1568 page_info->page_offset + hdr_len; 1604 page_info->page_offset + hdr_len;
1569 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); 1605 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1606 curr_frag_len - hdr_len);
1570 skb->data_len = curr_frag_len - hdr_len; 1607 skb->data_len = curr_frag_len - hdr_len;
1571 skb->truesize += rx_frag_size; 1608 skb->truesize += rx_frag_size;
1572 skb->tail += hdr_len; 1609 skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1725 if (rxcp->vlanf) { 1762 if (rxcp->vlanf) {
1726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, 1763 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1727 compl); 1764 compl);
1728 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1765 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1729 compl); 1766 vlan_tag, compl);
1730 } 1767 }
1731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); 1768 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1732 rxcp->tunneled = 1769 rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1757 if (rxcp->vlanf) { 1794 if (rxcp->vlanf) {
1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, 1795 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1759 compl); 1796 compl);
1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1797 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1761 compl); 1798 vlan_tag, compl);
1762 } 1799 }
1763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); 1800 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1764 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, 1801 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1799 rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1800 1837
1801 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && 1838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1802 !adapter->vlan_tag[rxcp->vlan_tag]) 1839 !test_bit(rxcp->vlan_tag, adapter->vids))
1803 rxcp->vlanf = 0; 1840 rxcp->vlanf = 0;
1804 } 1841 }
1805 1842
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1915} 1952}
1916 1953
1917static u16 be_tx_compl_process(struct be_adapter *adapter, 1954static u16 be_tx_compl_process(struct be_adapter *adapter,
1918 struct be_tx_obj *txo, u16 last_index) 1955 struct be_tx_obj *txo, u16 last_index)
1919{ 1956{
1920 struct be_queue_info *txq = &txo->q; 1957 struct be_queue_info *txq = &txo->q;
1921 struct be_eth_wrb *wrb; 1958 struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2122 2159
2123 eq = &eqo->q; 2160 eq = &eqo->q;
2124 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 2161 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2125 sizeof(struct be_eq_entry)); 2162 sizeof(struct be_eq_entry));
2126 if (rc) 2163 if (rc)
2127 return rc; 2164 return rc;
2128 2165
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
2155 2192
2156 cq = &adapter->mcc_obj.cq; 2193 cq = &adapter->mcc_obj.cq;
2157 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, 2194 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2158 sizeof(struct be_mcc_compl))) 2195 sizeof(struct be_mcc_compl)))
2159 goto err; 2196 goto err;
2160 2197
2161 /* Use the default EQ for MCC completions */ 2198 /* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
2275 rxo->adapter = adapter; 2312 rxo->adapter = adapter;
2276 cq = &rxo->cq; 2313 cq = &rxo->cq;
2277 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 2314 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2278 sizeof(struct be_eth_rx_compl)); 2315 sizeof(struct be_eth_rx_compl));
2279 if (rc) 2316 if (rc)
2280 return rc; 2317 return rc;
2281 2318
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
2339} 2376}
2340 2377
2341static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, 2378static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2342 int budget, int polling) 2379 int budget, int polling)
2343{ 2380{
2344 struct be_adapter *adapter = rxo->adapter; 2381 struct be_adapter *adapter = rxo->adapter;
2345 struct be_queue_info *rx_cq = &rxo->cq; 2382 struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2365 * promiscuous mode on some skews 2402 * promiscuous mode on some skews
2366 */ 2403 */
2367 if (unlikely(rxcp->port != adapter->port_num && 2404 if (unlikely(rxcp->port != adapter->port_num &&
2368 !lancer_chip(adapter))) { 2405 !lancer_chip(adapter))) {
2369 be_rx_compl_discard(rxo, rxcp); 2406 be_rx_compl_discard(rxo, rxcp);
2370 goto loop_continue; 2407 goto loop_continue;
2371 } 2408 }
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2405 if (!txcp) 2442 if (!txcp)
2406 break; 2443 break;
2407 num_wrbs += be_tx_compl_process(adapter, txo, 2444 num_wrbs += be_tx_compl_process(adapter, txo,
2408 AMAP_GET_BITS(struct amap_eth_tx_compl, 2445 AMAP_GET_BITS(struct
2409 wrb_index, txcp)); 2446 amap_eth_tx_compl,
2447 wrb_index, txcp));
2410 } 2448 }
2411 2449
2412 if (work_done) { 2450 if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2416 /* As Tx wrbs have been freed up, wake up netdev queue 2454 /* As Tx wrbs have been freed up, wake up netdev queue
2417 * if it was stopped due to lack of tx wrbs. */ 2455 * if it was stopped due to lack of tx wrbs. */
2418 if (__netif_subqueue_stopped(adapter->netdev, idx) && 2456 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2419 atomic_read(&txo->q.used) < txo->q.len / 2) { 2457 atomic_read(&txo->q.used) < txo->q.len / 2) {
2420 netif_wake_subqueue(adapter->netdev, idx); 2458 netif_wake_subqueue(adapter->netdev, idx);
2421 } 2459 }
2422 2460
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 2548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512 sliport_err1 = ioread32(adapter->db + 2550 sliport_err1 = ioread32(adapter->db +
2513 SLIPORT_ERROR1_OFFSET); 2551 SLIPORT_ERROR1_OFFSET);
2514 sliport_err2 = ioread32(adapter->db + 2552 sliport_err2 = ioread32(adapter->db +
2515 SLIPORT_ERROR2_OFFSET); 2553 SLIPORT_ERROR2_OFFSET);
2516 adapter->hw_error = true; 2554 adapter->hw_error = true;
2517 /* Do not log error messages if its a FW reset */ 2555 /* Do not log error messages if its a FW reset */
2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && 2556 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
2531 } 2569 }
2532 } else { 2570 } else {
2533 pci_read_config_dword(adapter->pdev, 2571 pci_read_config_dword(adapter->pdev,
2534 PCICFG_UE_STATUS_LOW, &ue_lo); 2572 PCICFG_UE_STATUS_LOW, &ue_lo);
2535 pci_read_config_dword(adapter->pdev, 2573 pci_read_config_dword(adapter->pdev,
2536 PCICFG_UE_STATUS_HIGH, &ue_hi); 2574 PCICFG_UE_STATUS_HIGH, &ue_hi);
2537 pci_read_config_dword(adapter->pdev, 2575 pci_read_config_dword(adapter->pdev,
2538 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2576 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2539 pci_read_config_dword(adapter->pdev, 2577 pci_read_config_dword(adapter->pdev,
2540 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); 2578 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2541 2579
2542 ue_lo = (ue_lo & ~ue_lo_mask); 2580 ue_lo = (ue_lo & ~ue_lo_mask);
2543 ue_hi = (ue_hi & ~ue_hi_mask); 2581 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
2624} 2662}
2625 2663
2626static inline int be_msix_vec_get(struct be_adapter *adapter, 2664static inline int be_msix_vec_get(struct be_adapter *adapter,
2627 struct be_eq_obj *eqo) 2665 struct be_eq_obj *eqo)
2628{ 2666{
2629 return adapter->msix_entries[eqo->msix_idx].vector; 2667 return adapter->msix_entries[eqo->msix_idx].vector;
2630} 2668}
@@ -2648,7 +2686,7 @@ err_msix:
2648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) 2686 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649 free_irq(be_msix_vec_get(adapter, eqo), eqo); 2687 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", 2688 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2651 status); 2689 status);
2652 be_msix_disable(adapter); 2690 be_msix_disable(adapter);
2653 return status; 2691 return status;
2654} 2692}
@@ -2774,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2774{ 2812{
2775 struct be_rx_obj *rxo; 2813 struct be_rx_obj *rxo;
2776 int rc, i, j; 2814 int rc, i, j;
2777 u8 rsstable[128]; 2815 u8 rss_hkey[RSS_HASH_KEY_LEN];
2816 struct rss_info *rss = &adapter->rss_info;
2778 2817
2779 for_all_rx_queues(adapter, rxo, i) { 2818 for_all_rx_queues(adapter, rxo, i) {
2780 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, 2819 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2799 } 2838 }
2800 2839
2801 if (be_multi_rxq(adapter)) { 2840 if (be_multi_rxq(adapter)) {
2802 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { 2841 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2842 j += adapter->num_rx_qs - 1) {
2803 for_all_rss_queues(adapter, rxo, i) { 2843 for_all_rss_queues(adapter, rxo, i) {
2804 if ((j + i) >= 128) 2844 if ((j + i) >= RSS_INDIR_TABLE_LEN)
2805 break; 2845 break;
2806 rsstable[j + i] = rxo->rss_id; 2846 rss->rsstable[j + i] = rxo->rss_id;
2847 rss->rss_queue[j + i] = i;
2807 } 2848 }
2808 } 2849 }
2809 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 2850 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; 2851 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2811 2852
2812 if (!BEx_chip(adapter)) 2853 if (!BEx_chip(adapter))
2813 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2854 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814 RSS_ENABLE_UDP_IPV6; 2855 RSS_ENABLE_UDP_IPV6;
2815 } else { 2856 } else {
2816 /* Disable RSS, if only default RX Q is created */ 2857 /* Disable RSS, if only default RX Q is created */
2817 adapter->rss_flags = RSS_ENABLE_NONE; 2858 rss->rss_flags = RSS_ENABLE_NONE;
2818 } 2859 }
2819 2860
2820 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2861 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
2821 128); 2862 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
2863 128, rss_hkey);
2822 if (rc) { 2864 if (rc) {
2823 adapter->rss_flags = RSS_ENABLE_NONE; 2865 rss->rss_flags = RSS_ENABLE_NONE;
2824 return rc; 2866 return rc;
2825 } 2867 }
2826 2868
2869 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2870
2827 /* First time posting */ 2871 /* First time posting */
2828 for_all_rx_queues(adapter, rxo, i) 2872 for_all_rx_queues(adapter, rxo, i)
2829 be_post_rx_frags(rxo, GFP_KERNEL); 2873 be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2896 2940
2897 if (enable) { 2941 if (enable) {
2898 status = pci_write_config_dword(adapter->pdev, 2942 status = pci_write_config_dword(adapter->pdev,
2899 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); 2943 PCICFG_PM_CONTROL_OFFSET,
2944 PCICFG_PM_CONTROL_MASK);
2900 if (status) { 2945 if (status) {
2901 dev_err(&adapter->pdev->dev, 2946 dev_err(&adapter->pdev->dev,
2902 "Could not enable Wake-on-lan\n"); 2947 "Could not enable Wake-on-lan\n");
@@ -2905,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2905 return status; 2950 return status;
2906 } 2951 }
2907 status = be_cmd_enable_magic_wol(adapter, 2952 status = be_cmd_enable_magic_wol(adapter,
2908 adapter->netdev->dev_addr, &cmd); 2953 adapter->netdev->dev_addr,
2954 &cmd);
2909 pci_enable_wake(adapter->pdev, PCI_D3hot, 1); 2955 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2910 pci_enable_wake(adapter->pdev, PCI_D3cold, 1); 2956 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2911 } else { 2957 } else {
@@ -2944,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
2944 2990
2945 if (status) 2991 if (status)
2946 dev_err(&adapter->pdev->dev, 2992 dev_err(&adapter->pdev->dev,
2947 "Mac address assignment failed for VF %d\n", vf); 2993 "Mac address assignment failed for VF %d\n",
2994 vf);
2948 else 2995 else
2949 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 2996 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2950 2997
@@ -3086,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3086 3133
3087 /* If a FW profile exists, then cap_flags are updated */ 3134 /* If a FW profile exists, then cap_flags are updated */
3088 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3135 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3089 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); 3136 BE_IF_FLAGS_BROADCAST |
3090 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3137 BE_IF_FLAGS_MULTICAST);
3091 &vf_cfg->if_handle, vf + 1); 3138 status =
3139 be_cmd_if_create(adapter, cap_flags, en_flags,
3140 &vf_cfg->if_handle, vf + 1);
3092 if (status) 3141 if (status)
3093 goto err; 3142 goto err;
3094 } 3143 }
@@ -3119,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3119 struct be_vf_cfg *vf_cfg; 3168 struct be_vf_cfg *vf_cfg;
3120 int status, old_vfs, vf; 3169 int status, old_vfs, vf;
3121 u32 privileges; 3170 u32 privileges;
3122 u16 lnk_speed;
3123 3171
3124 old_vfs = pci_num_vf(adapter->pdev); 3172 old_vfs = pci_num_vf(adapter->pdev);
3125 if (old_vfs) { 3173 if (old_vfs) {
@@ -3175,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
3175 vf); 3223 vf);
3176 } 3224 }
3177 3225
3178 /* BE3 FW, by default, caps VF TX-rate to 100mbps. 3226 /* Allow full available bandwidth */
3179 * Allow full available bandwidth 3227 if (!old_vfs)
3180 */ 3228 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3181 if (BE3_chip(adapter) && !old_vfs)
3182 be_cmd_config_qos(adapter, 1000, vf + 1);
3183
3184 status = be_cmd_link_status_query(adapter, &lnk_speed,
3185 NULL, vf + 1);
3186 if (!status)
3187 vf_cfg->tx_rate = lnk_speed;
3188 3229
3189 if (!old_vfs) { 3230 if (!old_vfs) {
3190 be_cmd_enable_vf(adapter, vf + 1); 3231 be_cmd_enable_vf(adapter, vf + 1);
@@ -3590,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
3590} 3631}
3591#endif 3632#endif
3592 3633
3593#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 3634static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3594static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3595
3596static bool be_flash_redboot(struct be_adapter *adapter,
3597 const u8 *p, u32 img_start, int image_size,
3598 int hdr_size)
3599{
3600 u32 crc_offset;
3601 u8 flashed_crc[4];
3602 int status;
3603
3604 crc_offset = hdr_size + img_start + image_size - 4;
3605
3606 p += crc_offset;
3607
3608 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3609 (image_size - 4));
3610 if (status) {
3611 dev_err(&adapter->pdev->dev,
3612 "could not get crc from flash, not flashing redboot\n");
3613 return false;
3614 }
3615
3616 /*update redboot only if crc does not match*/
3617 if (!memcmp(flashed_crc, p, 4))
3618 return false;
3619 else
3620 return true;
3621}
3622 3635
3623static bool phy_flashing_required(struct be_adapter *adapter) 3636static bool phy_flashing_required(struct be_adapter *adapter)
3624{ 3637{
@@ -3649,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
3649} 3662}
3650 3663
3651static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 3664static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3652 int header_size, 3665 int header_size,
3653 const struct firmware *fw) 3666 const struct firmware *fw)
3654{ 3667{
3655 struct flash_section_info *fsec = NULL; 3668 struct flash_section_info *fsec = NULL;
3656 const u8 *p = fw->data; 3669 const u8 *p = fw->data;
@@ -3665,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3665 return NULL; 3678 return NULL;
3666} 3679}
3667 3680
3681static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3682 u32 img_offset, u32 img_size, int hdr_size,
3683 u16 img_optype, bool *crc_match)
3684{
3685 u32 crc_offset;
3686 int status;
3687 u8 crc[4];
3688
3689 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3690 if (status)
3691 return status;
3692
3693 crc_offset = hdr_size + img_offset + img_size - 4;
3694
3695 /* Skip flashing, if crc of flashed region matches */
3696 if (!memcmp(crc, p + crc_offset, 4))
3697 *crc_match = true;
3698 else
3699 *crc_match = false;
3700
3701 return status;
3702}
3703
3668static int be_flash(struct be_adapter *adapter, const u8 *img, 3704static int be_flash(struct be_adapter *adapter, const u8 *img,
3669 struct be_dma_mem *flash_cmd, int optype, int img_size) 3705 struct be_dma_mem *flash_cmd, int optype, int img_size)
3670{ 3706{
3671 u32 total_bytes = 0, flash_op, num_bytes = 0;
3672 int status = 0;
3673 struct be_cmd_write_flashrom *req = flash_cmd->va; 3707 struct be_cmd_write_flashrom *req = flash_cmd->va;
3708 u32 total_bytes, flash_op, num_bytes;
3709 int status;
3674 3710
3675 total_bytes = img_size; 3711 total_bytes = img_size;
3676 while (total_bytes) { 3712 while (total_bytes) {
@@ -3693,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
3693 memcpy(req->data_buf, img, num_bytes); 3729 memcpy(req->data_buf, img, num_bytes);
3694 img += num_bytes; 3730 img += num_bytes;
3695 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 3731 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3696 flash_op, num_bytes); 3732 flash_op, num_bytes);
3697 if (status) { 3733 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
3698 if (status == ILLEGAL_IOCTL_REQ && 3734 optype == OPTYPE_PHY_FW)
3699 optype == OPTYPE_PHY_FW) 3735 break;
3700 break; 3736 else if (status)
3701 dev_err(&adapter->pdev->dev,
3702 "cmd to write to flash rom failed.\n");
3703 return status; 3737 return status;
3704 }
3705 } 3738 }
3706 return 0; 3739 return 0;
3707} 3740}
3708 3741
3709/* For BE2, BE3 and BE3-R */ 3742/* For BE2, BE3 and BE3-R */
3710static int be_flash_BEx(struct be_adapter *adapter, 3743static int be_flash_BEx(struct be_adapter *adapter,
3711 const struct firmware *fw, 3744 const struct firmware *fw,
3712 struct be_dma_mem *flash_cmd, 3745 struct be_dma_mem *flash_cmd, int num_of_images)
3713 int num_of_images)
3714
3715{ 3746{
3716 int status = 0, i, filehdr_size = 0;
3717 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); 3747 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3718 const u8 *p = fw->data; 3748 struct device *dev = &adapter->pdev->dev;
3719 const struct flash_comp *pflashcomp;
3720 int num_comp, redboot;
3721 struct flash_section_info *fsec = NULL; 3749 struct flash_section_info *fsec = NULL;
3750 int status, i, filehdr_size, num_comp;
3751 const struct flash_comp *pflashcomp;
3752 bool crc_match;
3753 const u8 *p;
3722 3754
3723 struct flash_comp gen3_flash_types[] = { 3755 struct flash_comp gen3_flash_types[] = {
3724 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, 3756 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3775,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3775 /* Get flash section info*/ 3807 /* Get flash section info*/
3776 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3808 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3777 if (!fsec) { 3809 if (!fsec) {
3778 dev_err(&adapter->pdev->dev, 3810 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3779 "Invalid Cookie. UFI corrupted ?\n");
3780 return -1; 3811 return -1;
3781 } 3812 }
3782 for (i = 0; i < num_comp; i++) { 3813 for (i = 0; i < num_comp; i++) {
@@ -3792,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
3792 continue; 3823 continue;
3793 3824
3794 if (pflashcomp[i].optype == OPTYPE_REDBOOT) { 3825 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3795 redboot = be_flash_redboot(adapter, fw->data, 3826 status = be_check_flash_crc(adapter, fw->data,
3796 pflashcomp[i].offset, pflashcomp[i].size, 3827 pflashcomp[i].offset,
3797 filehdr_size + img_hdrs_size); 3828 pflashcomp[i].size,
3798 if (!redboot) 3829 filehdr_size +
3830 img_hdrs_size,
3831 OPTYPE_REDBOOT, &crc_match);
3832 if (status) {
3833 dev_err(dev,
3834 "Could not get CRC for 0x%x region\n",
3835 pflashcomp[i].optype);
3836 continue;
3837 }
3838
3839 if (crc_match)
3799 continue; 3840 continue;
3800 } 3841 }
3801 3842
3802 p = fw->data; 3843 p = fw->data + filehdr_size + pflashcomp[i].offset +
3803 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; 3844 img_hdrs_size;
3804 if (p + pflashcomp[i].size > fw->data + fw->size) 3845 if (p + pflashcomp[i].size > fw->data + fw->size)
3805 return -1; 3846 return -1;
3806 3847
3807 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 3848 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3808 pflashcomp[i].size); 3849 pflashcomp[i].size);
3809 if (status) { 3850 if (status) {
3810 dev_err(&adapter->pdev->dev, 3851 dev_err(dev, "Flashing section type 0x%x failed\n",
3811 "Flashing section type %d failed.\n",
3812 pflashcomp[i].img_type); 3852 pflashcomp[i].img_type);
3813 return status; 3853 return status;
3814 } 3854 }
@@ -3816,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
3816 return 0; 3856 return 0;
3817} 3857}
3818 3858
3859static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3860{
3861 u32 img_type = le32_to_cpu(fsec_entry.type);
3862 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3863
3864 if (img_optype != 0xFFFF)
3865 return img_optype;
3866
3867 switch (img_type) {
3868 case IMAGE_FIRMWARE_iSCSI:
3869 img_optype = OPTYPE_ISCSI_ACTIVE;
3870 break;
3871 case IMAGE_BOOT_CODE:
3872 img_optype = OPTYPE_REDBOOT;
3873 break;
3874 case IMAGE_OPTION_ROM_ISCSI:
3875 img_optype = OPTYPE_BIOS;
3876 break;
3877 case IMAGE_OPTION_ROM_PXE:
3878 img_optype = OPTYPE_PXE_BIOS;
3879 break;
3880 case IMAGE_OPTION_ROM_FCoE:
3881 img_optype = OPTYPE_FCOE_BIOS;
3882 break;
3883 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3884 img_optype = OPTYPE_ISCSI_BACKUP;
3885 break;
3886 case IMAGE_NCSI:
3887 img_optype = OPTYPE_NCSI_FW;
3888 break;
3889 case IMAGE_FLASHISM_JUMPVECTOR:
3890 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3891 break;
3892 case IMAGE_FIRMWARE_PHY:
3893 img_optype = OPTYPE_SH_PHY_FW;
3894 break;
3895 case IMAGE_REDBOOT_DIR:
3896 img_optype = OPTYPE_REDBOOT_DIR;
3897 break;
3898 case IMAGE_REDBOOT_CONFIG:
3899 img_optype = OPTYPE_REDBOOT_CONFIG;
3900 break;
3901 case IMAGE_UFI_DIR:
3902 img_optype = OPTYPE_UFI_DIR;
3903 break;
3904 default:
3905 break;
3906 }
3907
3908 return img_optype;
3909}
3910
3819static int be_flash_skyhawk(struct be_adapter *adapter, 3911static int be_flash_skyhawk(struct be_adapter *adapter,
3820 const struct firmware *fw, 3912 const struct firmware *fw,
3821 struct be_dma_mem *flash_cmd, int num_of_images) 3913 struct be_dma_mem *flash_cmd, int num_of_images)
3822{ 3914{
3823 int status = 0, i, filehdr_size = 0;
3824 int img_offset, img_size, img_optype, redboot;
3825 int img_hdrs_size = num_of_images * sizeof(struct image_hdr); 3915 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3826 const u8 *p = fw->data; 3916 struct device *dev = &adapter->pdev->dev;
3827 struct flash_section_info *fsec = NULL; 3917 struct flash_section_info *fsec = NULL;
3918 u32 img_offset, img_size, img_type;
3919 int status, i, filehdr_size;
3920 bool crc_match, old_fw_img;
3921 u16 img_optype;
3922 const u8 *p;
3828 3923
3829 filehdr_size = sizeof(struct flash_file_hdr_g3); 3924 filehdr_size = sizeof(struct flash_file_hdr_g3);
3830 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3925 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3831 if (!fsec) { 3926 if (!fsec) {
3832 dev_err(&adapter->pdev->dev, 3927 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3833 "Invalid Cookie. UFI corrupted ?\n");
3834 return -1; 3928 return -1;
3835 } 3929 }
3836 3930
3837 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { 3931 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3838 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); 3932 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3839 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); 3933 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3934 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3935 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3936 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
3840 3937
3841 switch (le32_to_cpu(fsec->fsec_entry[i].type)) { 3938 if (img_optype == 0xFFFF)
3842 case IMAGE_FIRMWARE_iSCSI:
3843 img_optype = OPTYPE_ISCSI_ACTIVE;
3844 break;
3845 case IMAGE_BOOT_CODE:
3846 img_optype = OPTYPE_REDBOOT;
3847 break;
3848 case IMAGE_OPTION_ROM_ISCSI:
3849 img_optype = OPTYPE_BIOS;
3850 break;
3851 case IMAGE_OPTION_ROM_PXE:
3852 img_optype = OPTYPE_PXE_BIOS;
3853 break;
3854 case IMAGE_OPTION_ROM_FCoE:
3855 img_optype = OPTYPE_FCOE_BIOS;
3856 break;
3857 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3858 img_optype = OPTYPE_ISCSI_BACKUP;
3859 break;
3860 case IMAGE_NCSI:
3861 img_optype = OPTYPE_NCSI_FW;
3862 break;
3863 default:
3864 continue; 3939 continue;
3940 /* Don't bother verifying CRC if an old FW image is being
3941 * flashed
3942 */
3943 if (old_fw_img)
3944 goto flash;
3945
3946 status = be_check_flash_crc(adapter, fw->data, img_offset,
3947 img_size, filehdr_size +
3948 img_hdrs_size, img_optype,
3949 &crc_match);
3950 /* The current FW image on the card does not recognize the new
3951 * FLASH op_type. The FW download is partially complete.
3952 * Reboot the server now to enable FW image to recognize the
3953 * new FLASH op_type. To complete the remaining process,
3954 * download the same FW again after the reboot.
3955 */
3956 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3957 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
3958 dev_err(dev, "Flash incomplete. Reset the server\n");
3959 dev_err(dev, "Download FW image again after reset\n");
3960 return -EAGAIN;
3961 } else if (status) {
3962 dev_err(dev, "Could not get CRC for 0x%x region\n",
3963 img_optype);
3964 return -EFAULT;
3865 } 3965 }
3866 3966
3867 if (img_optype == OPTYPE_REDBOOT) { 3967 if (crc_match)
3868 redboot = be_flash_redboot(adapter, fw->data, 3968 continue;
3869 img_offset, img_size,
3870 filehdr_size + img_hdrs_size);
3871 if (!redboot)
3872 continue;
3873 }
3874 3969
3875 p = fw->data; 3970flash:
3876 p += filehdr_size + img_offset + img_hdrs_size; 3971 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
3877 if (p + img_size > fw->data + fw->size) 3972 if (p + img_size > fw->data + fw->size)
3878 return -1; 3973 return -1;
3879 3974
3880 status = be_flash(adapter, p, flash_cmd, img_optype, img_size); 3975 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3881 if (status) { 3976 /* For old FW images ignore ILLEGAL_FIELD error or errors on
3882 dev_err(&adapter->pdev->dev, 3977 * UFI_DIR region
3883 "Flashing section type %d failed.\n", 3978 */
3884 fsec->fsec_entry[i].type); 3979 if (old_fw_img &&
3885 return status; 3980 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
3981 (img_optype == OPTYPE_UFI_DIR &&
3982 base_status(status) == MCC_STATUS_FAILED))) {
3983 continue;
3984 } else if (status) {
3985 dev_err(dev, "Flashing section type 0x%x failed\n",
3986 img_type);
3987 return -EFAULT;
3886 } 3988 }
3887 } 3989 }
3888 return 0; 3990 return 0;
3889} 3991}
3890 3992
3891static int lancer_fw_download(struct be_adapter *adapter, 3993static int lancer_fw_download(struct be_adapter *adapter,
3892 const struct firmware *fw) 3994 const struct firmware *fw)
3893{ 3995{
3894#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) 3996#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3895#define LANCER_FW_DOWNLOAD_LOCATION "/prg" 3997#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
3955 } 4057 }
3956 4058
3957 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 4059 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3958 flash_cmd.dma); 4060 flash_cmd.dma);
3959 if (status) { 4061 if (status) {
3960 dev_err(&adapter->pdev->dev, 4062 dev_err(&adapter->pdev->dev,
3961 "Firmware load error. " 4063 "Firmware load error. "
@@ -3976,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3976 goto lancer_fw_exit; 4078 goto lancer_fw_exit;
3977 } 4079 }
3978 } else if (change_status != LANCER_NO_RESET_NEEDED) { 4080 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3979 dev_err(&adapter->pdev->dev, 4081 dev_err(&adapter->pdev->dev,
3980 "System reboot required for new FW" 4082 "System reboot required for new FW to be active\n");
3981 " to be active\n");
3982 } 4083 }
3983 4084
3984 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 4085 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4042 switch (ufi_type) { 4143 switch (ufi_type) {
4043 case UFI_TYPE4: 4144 case UFI_TYPE4:
4044 status = be_flash_skyhawk(adapter, fw, 4145 status = be_flash_skyhawk(adapter, fw,
4045 &flash_cmd, num_imgs); 4146 &flash_cmd, num_imgs);
4046 break; 4147 break;
4047 case UFI_TYPE3R: 4148 case UFI_TYPE3R:
4048 status = be_flash_BEx(adapter, fw, &flash_cmd, 4149 status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4213,7 @@ fw_exit:
4112 return status; 4213 return status;
4113} 4214}
4114 4215
4115static int be_ndo_bridge_setlink(struct net_device *dev, 4216static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4116 struct nlmsghdr *nlh)
4117{ 4217{
4118 struct be_adapter *adapter = netdev_priv(dev); 4218 struct be_adapter *adapter = netdev_priv(dev);
4119 struct nlattr *attr, *br_spec; 4219 struct nlattr *attr, *br_spec;
@@ -4155,8 +4255,7 @@ err:
4155} 4255}
4156 4256
4157static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4257static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4158 struct net_device *dev, 4258 struct net_device *dev, u32 filter_mask)
4159 u32 filter_mask)
4160{ 4259{
4161 struct be_adapter *adapter = netdev_priv(dev); 4260 struct be_adapter *adapter = netdev_priv(dev);
4162 int status = 0; 4261 int status = 0;
@@ -4254,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
4254 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, 4353 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4255 .ndo_set_vf_mac = be_set_vf_mac, 4354 .ndo_set_vf_mac = be_set_vf_mac,
4256 .ndo_set_vf_vlan = be_set_vf_vlan, 4355 .ndo_set_vf_vlan = be_set_vf_vlan,
4257 .ndo_set_vf_tx_rate = be_set_vf_tx_rate, 4356 .ndo_set_vf_rate = be_set_vf_tx_rate,
4258 .ndo_get_vf_config = be_get_vf_config, 4357 .ndo_get_vf_config = be_get_vf_config,
4259 .ndo_set_vf_link_state = be_set_vf_link_state, 4358 .ndo_set_vf_link_state = be_set_vf_link_state,
4260#ifdef CONFIG_NET_POLL_CONTROLLER 4359#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4301,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
4301 4400
4302 netdev->netdev_ops = &be_netdev_ops; 4401 netdev->netdev_ops = &be_netdev_ops;
4303 4402
4304 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 4403 netdev->ethtool_ops = &be_ethtool_ops;
4305} 4404}
4306 4405
4307static void be_unmap_pci_bars(struct be_adapter *adapter) 4406static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
4870} 4969}
4871 4970
4872static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, 4971static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4873 pci_channel_state_t state) 4972 pci_channel_state_t state)
4874{ 4973{
4875 struct be_adapter *adapter = pci_get_drvdata(pdev); 4974 struct be_adapter *adapter = pci_get_drvdata(pdev);
4876 struct net_device *netdev = adapter->netdev; 4975 struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
769 return phy_mii_ioctl(phy, ifr, cmd); 769 return phy_mii_ioctl(phy, ifr, cmd);
770} 770}
771 771
772static int ethoc_config(struct net_device *dev, struct ifmap *map)
773{
774 return -ENOSYS;
775}
776
777static void ethoc_do_set_mac_address(struct net_device *dev) 772static void ethoc_do_set_mac_address(struct net_device *dev)
778{ 773{
779 struct ethoc *priv = netdev_priv(dev); 774 struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
995 .ndo_open = ethoc_open, 990 .ndo_open = ethoc_open,
996 .ndo_stop = ethoc_stop, 991 .ndo_stop = ethoc_stop,
997 .ndo_do_ioctl = ethoc_ioctl, 992 .ndo_do_ioctl = ethoc_ioctl,
998 .ndo_set_config = ethoc_config,
999 .ndo_set_mac_address = ethoc_set_mac_address, 993 .ndo_set_mac_address = ethoc_set_mac_address,
1000 .ndo_set_rx_mode = ethoc_set_multicast_list, 994 .ndo_set_rx_mode = ethoc_set_multicast_list,
1001 .ndo_change_mtu = ethoc_change_mtu, 995 .ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 68069eabc4f8..c77fa4a69844 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
1210 1210
1211 SET_NETDEV_DEV(netdev, &pdev->dev); 1211 SET_NETDEV_DEV(netdev, &pdev->dev);
1212 1212
1213 SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops); 1213 netdev->ethtool_ops = &ftgmac100_ethtool_ops;
1214 netdev->netdev_ops = &ftgmac100_netdev_ops; 1214 netdev->netdev_ops = &ftgmac100_netdev_ops;
1215 netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; 1215 netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
1216 1216
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8be5b40c0a12..4ff1adc6bfca 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
1085 } 1085 }
1086 1086
1087 SET_NETDEV_DEV(netdev, &pdev->dev); 1087 SET_NETDEV_DEV(netdev, &pdev->dev);
1088 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops); 1088 netdev->ethtool_ops = &ftmac100_ethtool_ops;
1089 netdev->netdev_ops = &ftmac100_netdev_ops; 1089 netdev->netdev_ops = &ftmac100_netdev_ops;
1090 1090
1091 platform_set_drvdata(pdev, netdev); 1091 platform_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6048dc8604ee..270308315d43 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -67,6 +67,7 @@ config FSL_XGMAC_MDIO
67 tristate "Freescale XGMAC MDIO" 67 tristate "Freescale XGMAC MDIO"
68 depends on FSL_SOC 68 depends on FSL_SOC
69 select PHYLIB 69 select PHYLIB
70 select OF_MDIO
70 ---help--- 71 ---help---
71 This driver supports the MDIO bus on the Fman 10G Ethernet MACs. 72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
72 73
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d19ff05..671d080105a7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -221,7 +221,7 @@ struct bufdesc_ex {
221#define BD_ENET_TX_RCMASK ((ushort)0x003c) 221#define BD_ENET_TX_RCMASK ((ushort)0x003c)
222#define BD_ENET_TX_UN ((ushort)0x0002) 222#define BD_ENET_TX_UN ((ushort)0x0002)
223#define BD_ENET_TX_CSL ((ushort)0x0001) 223#define BD_ENET_TX_CSL ((ushort)0x0001)
224#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 224#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
225 225
226/*enhanced buffer descriptor control/status used by Ethernet transmit*/ 226/*enhanced buffer descriptor control/status used by Ethernet transmit*/
227#define BD_ENET_TX_INT 0x40000000 227#define BD_ENET_TX_INT 0x40000000
@@ -246,8 +246,8 @@ struct bufdesc_ex {
246#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 246#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
247#define FEC_ENET_TX_FRSIZE 2048 247#define FEC_ENET_TX_FRSIZE 2048
248#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 248#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
249#define TX_RING_SIZE 16 /* Must be power of two */ 249#define TX_RING_SIZE 512 /* Must be power of two */
250#define TX_RING_MOD_MASK 15 /* for this to work */ 250#define TX_RING_MOD_MASK 511 /* for this to work */
251 251
252#define BD_ENET_RX_INT 0x00800000 252#define BD_ENET_RX_INT 0x00800000
253#define BD_ENET_RX_PTP ((ushort)0x0400) 253#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -296,8 +296,15 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short bufdesc_size;
299 unsigned short tx_ring_size; 300 unsigned short tx_ring_size;
300 unsigned short rx_ring_size; 301 unsigned short rx_ring_size;
302 unsigned short tx_stop_threshold;
303 unsigned short tx_wake_threshold;
304
305 /* Software TSO */
306 char *tso_hdrs;
307 dma_addr_t tso_hdrs_dma;
301 308
302 struct platform_device *pdev; 309 struct platform_device *pdev;
303 310
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8d69e439f0c5..38d9d276ab8b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -36,6 +36,7 @@
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/ip.h> 37#include <linux/ip.h>
38#include <net/ip.h> 38#include <net/ip.h>
39#include <net/tso.h>
39#include <linux/tcp.h> 40#include <linux/tcp.h>
40#include <linux/udp.h> 41#include <linux/udp.h>
41#include <linux/icmp.h> 42#include <linux/icmp.h>
@@ -54,6 +55,7 @@
54#include <linux/of_net.h> 55#include <linux/of_net.h>
55#include <linux/regulator/consumer.h> 56#include <linux/regulator/consumer.h>
56#include <linux/if_vlan.h> 57#include <linux/if_vlan.h>
58#include <linux/pinctrl/consumer.h>
57 59
58#include <asm/cacheflush.h> 60#include <asm/cacheflush.h>
59 61
@@ -172,10 +174,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
172#endif 174#endif
173#endif /* CONFIG_M5272 */ 175#endif /* CONFIG_M5272 */
174 176
175#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
176#error "FEC: descriptor ring size constants too large"
177#endif
178
179/* Interrupt events/masks. */ 177/* Interrupt events/masks. */
180#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 178#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
181#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 179#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
@@ -231,6 +229,15 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
231#define FEC_PAUSE_FLAG_AUTONEG 0x1 229#define FEC_PAUSE_FLAG_AUTONEG 0x1
232#define FEC_PAUSE_FLAG_ENABLE 0x2 230#define FEC_PAUSE_FLAG_ENABLE 0x2
233 231
232#define TSO_HEADER_SIZE 128
233/* Max number of allowed TCP segments for software TSO */
234#define FEC_MAX_TSO_SEGS 100
235#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
236
237#define IS_TSO_HEADER(txq, addr) \
238 ((addr >= txq->tso_hdrs_dma) && \
239 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
240
234static int mii_cnt; 241static int mii_cnt;
235 242
236static inline 243static inline
@@ -286,6 +293,22 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
286 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 293 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
287} 294}
288 295
296static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
297 struct fec_enet_private *fep)
298{
299 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
300}
301
302static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
303{
304 int entries;
305
306 entries = ((const char *)fep->dirty_tx -
307 (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
308
309 return entries > 0 ? entries : entries + fep->tx_ring_size;
310}
311
289static void *swap_buffer(void *bufaddr, int len) 312static void *swap_buffer(void *bufaddr, int len)
290{ 313{
291 int i; 314 int i;
@@ -307,33 +330,133 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
307 if (unlikely(skb_cow_head(skb, 0))) 330 if (unlikely(skb_cow_head(skb, 0)))
308 return -1; 331 return -1;
309 332
333 ip_hdr(skb)->check = 0;
310 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 334 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
311 335
312 return 0; 336 return 0;
313} 337}
314 338
315static netdev_tx_t 339static void
316fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 340fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
341{
342 const struct platform_device_id *id_entry =
343 platform_get_device_id(fep->pdev);
344 struct bufdesc *bdp_pre;
345
346 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
347 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
348 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
349 fep->delay_work.trig_tx = true;
350 schedule_delayed_work(&(fep->delay_work.delay_work),
351 msecs_to_jiffies(1));
352 }
353}
354
355static int
356fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
317{ 357{
318 struct fec_enet_private *fep = netdev_priv(ndev); 358 struct fec_enet_private *fep = netdev_priv(ndev);
319 const struct platform_device_id *id_entry = 359 const struct platform_device_id *id_entry =
320 platform_get_device_id(fep->pdev); 360 platform_get_device_id(fep->pdev);
321 struct bufdesc *bdp, *bdp_pre; 361 struct bufdesc *bdp = fep->cur_tx;
322 void *bufaddr; 362 struct bufdesc_ex *ebdp;
323 unsigned short status; 363 int nr_frags = skb_shinfo(skb)->nr_frags;
364 int frag, frag_len;
365 unsigned short status;
366 unsigned int estatus = 0;
367 skb_frag_t *this_frag;
324 unsigned int index; 368 unsigned int index;
369 void *bufaddr;
370 int i;
325 371
326 /* Fill in a Tx ring entry */ 372 for (frag = 0; frag < nr_frags; frag++) {
373 this_frag = &skb_shinfo(skb)->frags[frag];
374 bdp = fec_enet_get_nextdesc(bdp, fep);
375 ebdp = (struct bufdesc_ex *)bdp;
376
377 status = bdp->cbd_sc;
378 status &= ~BD_ENET_TX_STATS;
379 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
380 frag_len = skb_shinfo(skb)->frags[frag].size;
381
382 /* Handle the last BD specially */
383 if (frag == nr_frags - 1) {
384 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
385 if (fep->bufdesc_ex) {
386 estatus |= BD_ENET_TX_INT;
387 if (unlikely(skb_shinfo(skb)->tx_flags &
388 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
389 estatus |= BD_ENET_TX_TS;
390 }
391 }
392
393 if (fep->bufdesc_ex) {
394 if (skb->ip_summed == CHECKSUM_PARTIAL)
395 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
396 ebdp->cbd_bdu = 0;
397 ebdp->cbd_esc = estatus;
398 }
399
400 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
401
402 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
403 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
404 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
405 memcpy(fep->tx_bounce[index], bufaddr, frag_len);
406 bufaddr = fep->tx_bounce[index];
407
408 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
409 swap_buffer(bufaddr, frag_len);
410 }
411
412 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
413 frag_len, DMA_TO_DEVICE);
414 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
415 dev_kfree_skb_any(skb);
416 if (net_ratelimit())
417 netdev_err(ndev, "Tx DMA memory map failed\n");
418 goto dma_mapping_error;
419 }
420
421 bdp->cbd_datlen = frag_len;
422 bdp->cbd_sc = status;
423 }
424
425 fep->cur_tx = bdp;
426
427 return 0;
428
429dma_mapping_error:
327 bdp = fep->cur_tx; 430 bdp = fep->cur_tx;
431 for (i = 0; i < frag; i++) {
432 bdp = fec_enet_get_nextdesc(bdp, fep);
433 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
434 bdp->cbd_datlen, DMA_TO_DEVICE);
435 }
436 return NETDEV_TX_OK;
437}
328 438
329 status = bdp->cbd_sc; 439static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
440{
441 struct fec_enet_private *fep = netdev_priv(ndev);
442 const struct platform_device_id *id_entry =
443 platform_get_device_id(fep->pdev);
444 int nr_frags = skb_shinfo(skb)->nr_frags;
445 struct bufdesc *bdp, *last_bdp;
446 void *bufaddr;
447 unsigned short status;
448 unsigned short buflen;
449 unsigned int estatus = 0;
450 unsigned int index;
451 int entries_free;
452 int ret;
330 453
331 if (status & BD_ENET_TX_READY) { 454 entries_free = fec_enet_get_free_txdesc_num(fep);
332 /* Ooops. All transmit buffers are full. Bail out. 455 if (entries_free < MAX_SKB_FRAGS + 1) {
333 * This should not happen, since ndev->tbusy should be set. 456 dev_kfree_skb_any(skb);
334 */ 457 if (net_ratelimit())
335 netdev_err(ndev, "tx queue full!\n"); 458 netdev_err(ndev, "NOT enough BD for SG!\n");
336 return NETDEV_TX_BUSY; 459 return NETDEV_TX_OK;
337 } 460 }
338 461
339 /* Protocol checksum off-load for TCP and UDP. */ 462 /* Protocol checksum off-load for TCP and UDP. */
@@ -342,102 +465,300 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
342 return NETDEV_TX_OK; 465 return NETDEV_TX_OK;
343 } 466 }
344 467
345 /* Clear all of the status flags */ 468 /* Fill in a Tx ring entry */
469 bdp = fep->cur_tx;
470 status = bdp->cbd_sc;
346 status &= ~BD_ENET_TX_STATS; 471 status &= ~BD_ENET_TX_STATS;
347 472
348 /* Set buffer length and buffer pointer */ 473 /* Set buffer length and buffer pointer */
349 bufaddr = skb->data; 474 bufaddr = skb->data;
350 bdp->cbd_datlen = skb->len; 475 buflen = skb_headlen(skb);
351
352 /*
353 * On some FEC implementations data must be aligned on
354 * 4-byte boundaries. Use bounce buffers to copy data
355 * and get it aligned. Ugh.
356 */
357 if (fep->bufdesc_ex)
358 index = (struct bufdesc_ex *)bdp -
359 (struct bufdesc_ex *)fep->tx_bd_base;
360 else
361 index = bdp - fep->tx_bd_base;
362 476
363 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 477 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
364 memcpy(fep->tx_bounce[index], skb->data, skb->len); 478 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
479 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
480 memcpy(fep->tx_bounce[index], skb->data, buflen);
365 bufaddr = fep->tx_bounce[index]; 481 bufaddr = fep->tx_bounce[index];
366 }
367 482
368 /* 483 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
369 * Some design made an incorrect assumption on endian mode of 484 swap_buffer(bufaddr, buflen);
370 * the system that it's running on. As the result, driver has to 485 }
371 * swap every frame going to and coming from the controller.
372 */
373 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
374 swap_buffer(bufaddr, skb->len);
375
376 /* Save skb pointer */
377 fep->tx_skbuff[index] = skb;
378 486
379 /* Push the data cache so the CPM does not get stale memory 487 /* Push the data cache so the CPM does not get stale memory
380 * data. 488 * data.
381 */ 489 */
382 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 490 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
383 skb->len, DMA_TO_DEVICE); 491 buflen, DMA_TO_DEVICE);
384 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 492 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
385 bdp->cbd_bufaddr = 0;
386 fep->tx_skbuff[index] = NULL;
387 dev_kfree_skb_any(skb); 493 dev_kfree_skb_any(skb);
388 if (net_ratelimit()) 494 if (net_ratelimit())
389 netdev_err(ndev, "Tx DMA memory map failed\n"); 495 netdev_err(ndev, "Tx DMA memory map failed\n");
390 return NETDEV_TX_OK; 496 return NETDEV_TX_OK;
391 } 497 }
392 498
499 if (nr_frags) {
500 ret = fec_enet_txq_submit_frag_skb(skb, ndev);
501 if (ret)
502 return ret;
503 } else {
504 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
505 if (fep->bufdesc_ex) {
506 estatus = BD_ENET_TX_INT;
507 if (unlikely(skb_shinfo(skb)->tx_flags &
508 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
509 estatus |= BD_ENET_TX_TS;
510 }
511 }
512
393 if (fep->bufdesc_ex) { 513 if (fep->bufdesc_ex) {
394 514
395 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 515 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
396 ebdp->cbd_bdu = 0; 516
397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 517 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
398 fep->hwts_tx_en)) { 518 fep->hwts_tx_en))
399 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
400 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 519 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
401 } else {
402 ebdp->cbd_esc = BD_ENET_TX_INT;
403 520
404 /* Enable protocol checksum flags 521 if (skb->ip_summed == CHECKSUM_PARTIAL)
405 * We do not bother with the IP Checksum bits as they 522 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
406 * are done by the kernel 523
407 */ 524 ebdp->cbd_bdu = 0;
408 if (skb->ip_summed == CHECKSUM_PARTIAL) 525 ebdp->cbd_esc = estatus;
409 ebdp->cbd_esc |= BD_ENET_TX_PINS;
410 }
411 } 526 }
412 527
528 last_bdp = fep->cur_tx;
529 index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
530 /* Save skb pointer */
531 fep->tx_skbuff[index] = skb;
532
533 bdp->cbd_datlen = buflen;
534
413 /* Send it on its way. Tell FEC it's ready, interrupt when done, 535 /* Send it on its way. Tell FEC it's ready, interrupt when done,
414 * it's the last BD of the frame, and to put the CRC on the end. 536 * it's the last BD of the frame, and to put the CRC on the end.
415 */ 537 */
416 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 538 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
417 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
418 bdp->cbd_sc = status; 539 bdp->cbd_sc = status;
419 540
420 bdp_pre = fec_enet_get_prevdesc(bdp, fep); 541 fec_enet_submit_work(bdp, fep);
421 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
422 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
423 fep->delay_work.trig_tx = true;
424 schedule_delayed_work(&(fep->delay_work.delay_work),
425 msecs_to_jiffies(1));
426 }
427 542
428 /* If this was the last BD in the ring, start at the beginning again. */ 543 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 544 bdp = fec_enet_get_nextdesc(last_bdp, fep);
430 545
431 skb_tx_timestamp(skb); 546 skb_tx_timestamp(skb);
432 547
433 fep->cur_tx = bdp; 548 fep->cur_tx = bdp;
434 549
435 if (fep->cur_tx == fep->dirty_tx) 550 /* Trigger transmission start */
436 netif_stop_queue(ndev); 551 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
552
553 return 0;
554}
555
556static int
557fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
558 struct bufdesc *bdp, int index, char *data,
559 int size, bool last_tcp, bool is_last)
560{
561 struct fec_enet_private *fep = netdev_priv(ndev);
562 const struct platform_device_id *id_entry =
563 platform_get_device_id(fep->pdev);
564 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
565 unsigned short status;
566 unsigned int estatus = 0;
567
568 status = bdp->cbd_sc;
569 status &= ~BD_ENET_TX_STATS;
570
571 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
572 bdp->cbd_datlen = size;
573
574 if (((unsigned long) data) & FEC_ALIGNMENT ||
575 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
576 memcpy(fep->tx_bounce[index], data, size);
577 data = fep->tx_bounce[index];
578
579 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
580 swap_buffer(data, size);
581 }
582
583 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
584 size, DMA_TO_DEVICE);
585 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
586 dev_kfree_skb_any(skb);
587 if (net_ratelimit())
588 netdev_err(ndev, "Tx DMA memory map failed\n");
589 return NETDEV_TX_BUSY;
590 }
591
592 if (fep->bufdesc_ex) {
593 if (skb->ip_summed == CHECKSUM_PARTIAL)
594 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
595 ebdp->cbd_bdu = 0;
596 ebdp->cbd_esc = estatus;
597 }
598
599 /* Handle the last BD specially */
600 if (last_tcp)
601 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
602 if (is_last) {
603 status |= BD_ENET_TX_INTR;
604 if (fep->bufdesc_ex)
605 ebdp->cbd_esc |= BD_ENET_TX_INT;
606 }
607
608 bdp->cbd_sc = status;
609
610 return 0;
611}
612
613static int
614fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
615 struct bufdesc *bdp, int index)
616{
617 struct fec_enet_private *fep = netdev_priv(ndev);
618 const struct platform_device_id *id_entry =
619 platform_get_device_id(fep->pdev);
620 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
621 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
622 void *bufaddr;
623 unsigned long dmabuf;
624 unsigned short status;
625 unsigned int estatus = 0;
626
627 status = bdp->cbd_sc;
628 status &= ~BD_ENET_TX_STATS;
629 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
630
631 bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
632 dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
633 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
634 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
635 memcpy(fep->tx_bounce[index], skb->data, hdr_len);
636 bufaddr = fep->tx_bounce[index];
637
638 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
639 swap_buffer(bufaddr, hdr_len);
640
641 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
642 hdr_len, DMA_TO_DEVICE);
643 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
644 dev_kfree_skb_any(skb);
645 if (net_ratelimit())
646 netdev_err(ndev, "Tx DMA memory map failed\n");
647 return NETDEV_TX_BUSY;
648 }
649 }
650
651 bdp->cbd_bufaddr = dmabuf;
652 bdp->cbd_datlen = hdr_len;
653
654 if (fep->bufdesc_ex) {
655 if (skb->ip_summed == CHECKSUM_PARTIAL)
656 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
657 ebdp->cbd_bdu = 0;
658 ebdp->cbd_esc = estatus;
659 }
660
661 bdp->cbd_sc = status;
662
663 return 0;
664}
665
666static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
667{
668 struct fec_enet_private *fep = netdev_priv(ndev);
669 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
670 int total_len, data_left;
671 struct bufdesc *bdp = fep->cur_tx;
672 struct tso_t tso;
673 unsigned int index = 0;
674 int ret;
675
676 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
677 dev_kfree_skb_any(skb);
678 if (net_ratelimit())
679 netdev_err(ndev, "NOT enough BD for TSO!\n");
680 return NETDEV_TX_OK;
681 }
682
683 /* Protocol checksum off-load for TCP and UDP. */
684 if (fec_enet_clear_csum(skb, ndev)) {
685 dev_kfree_skb_any(skb);
686 return NETDEV_TX_OK;
687 }
688
689 /* Initialize the TSO handler, and prepare the first payload */
690 tso_start(skb, &tso);
691
692 total_len = skb->len - hdr_len;
693 while (total_len > 0) {
694 char *hdr;
695
696 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
697 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
698 total_len -= data_left;
699
700 /* prepare packet headers: MAC + IP + TCP */
701 hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
702 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
703 ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
704 if (ret)
705 goto err_release;
706
707 while (data_left > 0) {
708 int size;
709
710 size = min_t(int, tso.size, data_left);
711 bdp = fec_enet_get_nextdesc(bdp, fep);
712 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
713 ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
714 size, size == data_left,
715 total_len == 0);
716 if (ret)
717 goto err_release;
718
719 data_left -= size;
720 tso_build_data(skb, &tso, size);
721 }
722
723 bdp = fec_enet_get_nextdesc(bdp, fep);
724 }
725
726 /* Save skb pointer */
727 fep->tx_skbuff[index] = skb;
728
729 fec_enet_submit_work(bdp, fep);
730
731 skb_tx_timestamp(skb);
732 fep->cur_tx = bdp;
437 733
438 /* Trigger transmission start */ 734 /* Trigger transmission start */
439 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 735 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
440 736
737 return 0;
738
739err_release:
740 /* TODO: Release all used data descriptors for TSO */
741 return ret;
742}
743
744static netdev_tx_t
745fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
746{
747 struct fec_enet_private *fep = netdev_priv(ndev);
748 int entries_free;
749 int ret;
750
751 if (skb_is_gso(skb))
752 ret = fec_enet_txq_submit_tso(skb, ndev);
753 else
754 ret = fec_enet_txq_submit_skb(skb, ndev);
755 if (ret)
756 return ret;
757
758 entries_free = fec_enet_get_free_txdesc_num(fep);
759 if (entries_free <= fep->tx_stop_threshold)
760 netif_stop_queue(ndev);
761
441 return NETDEV_TX_OK; 762 return NETDEV_TX_OK;
442} 763}
443 764
@@ -756,6 +1077,7 @@ fec_enet_tx(struct net_device *ndev)
756 unsigned short status; 1077 unsigned short status;
757 struct sk_buff *skb; 1078 struct sk_buff *skb;
758 int index = 0; 1079 int index = 0;
1080 int entries_free;
759 1081
760 fep = netdev_priv(ndev); 1082 fep = netdev_priv(ndev);
761 bdp = fep->dirty_tx; 1083 bdp = fep->dirty_tx;
@@ -769,16 +1091,17 @@ fec_enet_tx(struct net_device *ndev)
769 if (bdp == fep->cur_tx) 1091 if (bdp == fep->cur_tx)
770 break; 1092 break;
771 1093
772 if (fep->bufdesc_ex) 1094 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
773 index = (struct bufdesc_ex *)bdp -
774 (struct bufdesc_ex *)fep->tx_bd_base;
775 else
776 index = bdp - fep->tx_bd_base;
777 1095
778 skb = fep->tx_skbuff[index]; 1096 skb = fep->tx_skbuff[index];
779 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, 1097 if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
780 DMA_TO_DEVICE); 1098 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1099 bdp->cbd_datlen, DMA_TO_DEVICE);
781 bdp->cbd_bufaddr = 0; 1100 bdp->cbd_bufaddr = 0;
1101 if (!skb) {
1102 bdp = fec_enet_get_nextdesc(bdp, fep);
1103 continue;
1104 }
782 1105
783 /* Check for errors. */ 1106 /* Check for errors. */
784 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1107 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -797,7 +1120,7 @@ fec_enet_tx(struct net_device *ndev)
797 ndev->stats.tx_carrier_errors++; 1120 ndev->stats.tx_carrier_errors++;
798 } else { 1121 } else {
799 ndev->stats.tx_packets++; 1122 ndev->stats.tx_packets++;
800 ndev->stats.tx_bytes += bdp->cbd_datlen; 1123 ndev->stats.tx_bytes += skb->len;
801 } 1124 }
802 1125
803 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1126 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -834,15 +1157,15 @@ fec_enet_tx(struct net_device *ndev)
834 1157
835 /* Since we have freed up a buffer, the ring is no longer full 1158 /* Since we have freed up a buffer, the ring is no longer full
836 */ 1159 */
837 if (fep->dirty_tx != fep->cur_tx) { 1160 if (netif_queue_stopped(ndev)) {
838 if (netif_queue_stopped(ndev)) 1161 entries_free = fec_enet_get_free_txdesc_num(fep);
1162 if (entries_free >= fep->tx_wake_threshold)
839 netif_wake_queue(ndev); 1163 netif_wake_queue(ndev);
840 } 1164 }
841 } 1165 }
842 return; 1166 return;
843} 1167}
844 1168
845
846/* During a receive, the cur_rx points to the current incoming buffer. 1169/* During a receive, the cur_rx points to the current incoming buffer.
847 * When we update through the ring, if the next incoming buffer has 1170 * When we update through the ring, if the next incoming buffer has
848 * not been given to the system, we just set the empty indicator, 1171 * not been given to the system, we just set the empty indicator,
@@ -920,11 +1243,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
920 pkt_len = bdp->cbd_datlen; 1243 pkt_len = bdp->cbd_datlen;
921 ndev->stats.rx_bytes += pkt_len; 1244 ndev->stats.rx_bytes += pkt_len;
922 1245
923 if (fep->bufdesc_ex) 1246 index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
924 index = (struct bufdesc_ex *)bdp -
925 (struct bufdesc_ex *)fep->rx_bd_base;
926 else
927 index = bdp - fep->rx_bd_base;
928 data = fep->rx_skbuff[index]->data; 1247 data = fep->rx_skbuff[index]->data;
929 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1248 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
930 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1249 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1255,6 +1574,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1255 return 0; 1574 return 0;
1256} 1575}
1257 1576
1577static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1578{
1579 struct fec_enet_private *fep = netdev_priv(ndev);
1580 int ret;
1581
1582 if (enable) {
1583 ret = clk_prepare_enable(fep->clk_ahb);
1584 if (ret)
1585 return ret;
1586 ret = clk_prepare_enable(fep->clk_ipg);
1587 if (ret)
1588 goto failed_clk_ipg;
1589 if (fep->clk_enet_out) {
1590 ret = clk_prepare_enable(fep->clk_enet_out);
1591 if (ret)
1592 goto failed_clk_enet_out;
1593 }
1594 if (fep->clk_ptp) {
1595 ret = clk_prepare_enable(fep->clk_ptp);
1596 if (ret)
1597 goto failed_clk_ptp;
1598 }
1599 } else {
1600 clk_disable_unprepare(fep->clk_ahb);
1601 clk_disable_unprepare(fep->clk_ipg);
1602 if (fep->clk_enet_out)
1603 clk_disable_unprepare(fep->clk_enet_out);
1604 if (fep->clk_ptp)
1605 clk_disable_unprepare(fep->clk_ptp);
1606 }
1607
1608 return 0;
1609failed_clk_ptp:
1610 if (fep->clk_enet_out)
1611 clk_disable_unprepare(fep->clk_enet_out);
1612failed_clk_enet_out:
1613 clk_disable_unprepare(fep->clk_ipg);
1614failed_clk_ipg:
1615 clk_disable_unprepare(fep->clk_ahb);
1616
1617 return ret;
1618}
1619
1258static int fec_enet_mii_probe(struct net_device *ndev) 1620static int fec_enet_mii_probe(struct net_device *ndev)
1259{ 1621{
1260 struct fec_enet_private *fep = netdev_priv(ndev); 1622 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1726,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1364 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1726 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1365 * document. 1727 * document.
1366 */ 1728 */
1367 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000); 1729 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1368 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) 1730 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1369 fep->phy_speed--; 1731 fep->phy_speed--;
1370 fep->phy_speed <<= 1; 1732 fep->phy_speed <<= 1;
@@ -1773,6 +2135,11 @@ fec_enet_open(struct net_device *ndev)
1773 struct fec_enet_private *fep = netdev_priv(ndev); 2135 struct fec_enet_private *fep = netdev_priv(ndev);
1774 int ret; 2136 int ret;
1775 2137
2138 pinctrl_pm_select_default_state(&fep->pdev->dev);
2139 ret = fec_enet_clk_enable(ndev, true);
2140 if (ret)
2141 return ret;
2142
1776 /* I should reset the ring buffers here, but I don't yet know 2143 /* I should reset the ring buffers here, but I don't yet know
1777 * a simple way to do that. 2144 * a simple way to do that.
1778 */ 2145 */
@@ -1811,6 +2178,8 @@ fec_enet_close(struct net_device *ndev)
1811 phy_disconnect(fep->phy_dev); 2178 phy_disconnect(fep->phy_dev);
1812 } 2179 }
1813 2180
2181 fec_enet_clk_enable(ndev, false);
2182 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
1814 fec_enet_free_buffers(ndev); 2183 fec_enet_free_buffers(ndev);
1815 2184
1816 return 0; 2185 return 0;
@@ -1988,13 +2357,35 @@ static int fec_enet_init(struct net_device *ndev)
1988 const struct platform_device_id *id_entry = 2357 const struct platform_device_id *id_entry =
1989 platform_get_device_id(fep->pdev); 2358 platform_get_device_id(fep->pdev);
1990 struct bufdesc *cbd_base; 2359 struct bufdesc *cbd_base;
2360 int bd_size;
2361
2362 /* init the tx & rx ring size */
2363 fep->tx_ring_size = TX_RING_SIZE;
2364 fep->rx_ring_size = RX_RING_SIZE;
2365
2366 fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2367 fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
2368
2369 if (fep->bufdesc_ex)
2370 fep->bufdesc_size = sizeof(struct bufdesc_ex);
2371 else
2372 fep->bufdesc_size = sizeof(struct bufdesc);
2373 bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
2374 fep->bufdesc_size;
1991 2375
1992 /* Allocate memory for buffer descriptors. */ 2376 /* Allocate memory for buffer descriptors. */
1993 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 2377 cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
1994 GFP_KERNEL); 2378 GFP_KERNEL);
1995 if (!cbd_base) 2379 if (!cbd_base)
1996 return -ENOMEM; 2380 return -ENOMEM;
1997 2381
2382 fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
2383 &fep->tso_hdrs_dma, GFP_KERNEL);
2384 if (!fep->tso_hdrs) {
2385 dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
2386 return -ENOMEM;
2387 }
2388
1998 memset(cbd_base, 0, PAGE_SIZE); 2389 memset(cbd_base, 0, PAGE_SIZE);
1999 2390
2000 fep->netdev = ndev; 2391 fep->netdev = ndev;
@@ -2004,10 +2395,6 @@ static int fec_enet_init(struct net_device *ndev)
2004 /* make sure MAC we just acquired is programmed into the hw */ 2395 /* make sure MAC we just acquired is programmed into the hw */
2005 fec_set_mac_address(ndev, NULL); 2396 fec_set_mac_address(ndev, NULL);
2006 2397
2007 /* init the tx & rx ring size */
2008 fep->tx_ring_size = TX_RING_SIZE;
2009 fep->rx_ring_size = RX_RING_SIZE;
2010
2011 /* Set receive and transmit descriptor base. */ 2398 /* Set receive and transmit descriptor base. */
2012 fep->rx_bd_base = cbd_base; 2399 fep->rx_bd_base = cbd_base;
2013 if (fep->bufdesc_ex) 2400 if (fep->bufdesc_ex)
@@ -2024,21 +2411,21 @@ static int fec_enet_init(struct net_device *ndev)
2024 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 2411 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2025 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 2412 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2026 2413
2027 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { 2414 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
2028 /* enable hw VLAN support */ 2415 /* enable hw VLAN support */
2029 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 2416 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2030 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2031 }
2032 2417
2033 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { 2418 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2419 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
2420
2034 /* enable hw accelerator */ 2421 /* enable hw accelerator */
2035 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2422 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2036 | NETIF_F_RXCSUM); 2423 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
2037 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2038 | NETIF_F_RXCSUM);
2039 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 2424 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2040 } 2425 }
2041 2426
2427 ndev->hw_features = ndev->features;
2428
2042 fec_restart(ndev, 0); 2429 fec_restart(ndev, 0);
2043 2430
2044 return 0; 2431 return 0;
@@ -2114,6 +2501,9 @@ fec_probe(struct platform_device *pdev)
2114 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 2501 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2115#endif 2502#endif
2116 2503
2504 /* Select default pin state */
2505 pinctrl_pm_select_default_state(&pdev->dev);
2506
2117 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2507 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2118 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 2508 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2119 if (IS_ERR(fep->hwp)) { 2509 if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2554,10 @@ fec_probe(struct platform_device *pdev)
2164 fep->bufdesc_ex = 0; 2554 fep->bufdesc_ex = 0;
2165 } 2555 }
2166 2556
2167 ret = clk_prepare_enable(fep->clk_ahb); 2557 ret = fec_enet_clk_enable(ndev, true);
2168 if (ret) 2558 if (ret)
2169 goto failed_clk; 2559 goto failed_clk;
2170 2560
2171 ret = clk_prepare_enable(fep->clk_ipg);
2172 if (ret)
2173 goto failed_clk_ipg;
2174
2175 if (fep->clk_enet_out) {
2176 ret = clk_prepare_enable(fep->clk_enet_out);
2177 if (ret)
2178 goto failed_clk_enet_out;
2179 }
2180
2181 if (fep->clk_ptp) {
2182 ret = clk_prepare_enable(fep->clk_ptp);
2183 if (ret)
2184 goto failed_clk_ptp;
2185 }
2186
2187 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 2561 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2188 if (!IS_ERR(fep->reg_phy)) { 2562 if (!IS_ERR(fep->reg_phy)) {
2189 ret = regulator_enable(fep->reg_phy); 2563 ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2599,8 @@ fec_probe(struct platform_device *pdev)
2225 2599
2226 /* Carrier starts down, phylib will bring it up */ 2600 /* Carrier starts down, phylib will bring it up */
2227 netif_carrier_off(ndev); 2601 netif_carrier_off(ndev);
2602 fec_enet_clk_enable(ndev, false);
2603 pinctrl_pm_select_sleep_state(&pdev->dev);
2228 2604
2229 ret = register_netdev(ndev); 2605 ret = register_netdev(ndev);
2230 if (ret) 2606 if (ret)
@@ -2244,15 +2620,7 @@ failed_init:
2244 if (fep->reg_phy) 2620 if (fep->reg_phy)
2245 regulator_disable(fep->reg_phy); 2621 regulator_disable(fep->reg_phy);
2246failed_regulator: 2622failed_regulator:
2247 if (fep->clk_ptp) 2623 fec_enet_clk_enable(ndev, false);
2248 clk_disable_unprepare(fep->clk_ptp);
2249failed_clk_ptp:
2250 if (fep->clk_enet_out)
2251 clk_disable_unprepare(fep->clk_enet_out);
2252failed_clk_enet_out:
2253 clk_disable_unprepare(fep->clk_ipg);
2254failed_clk_ipg:
2255 clk_disable_unprepare(fep->clk_ahb);
2256failed_clk: 2624failed_clk:
2257failed_ioremap: 2625failed_ioremap:
2258 free_netdev(ndev); 2626 free_netdev(ndev);
@@ -2272,14 +2640,9 @@ fec_drv_remove(struct platform_device *pdev)
2272 del_timer_sync(&fep->time_keep); 2640 del_timer_sync(&fep->time_keep);
2273 if (fep->reg_phy) 2641 if (fep->reg_phy)
2274 regulator_disable(fep->reg_phy); 2642 regulator_disable(fep->reg_phy);
2275 if (fep->clk_ptp)
2276 clk_disable_unprepare(fep->clk_ptp);
2277 if (fep->ptp_clock) 2643 if (fep->ptp_clock)
2278 ptp_clock_unregister(fep->ptp_clock); 2644 ptp_clock_unregister(fep->ptp_clock);
2279 if (fep->clk_enet_out) 2645 fec_enet_clk_enable(ndev, false);
2280 clk_disable_unprepare(fep->clk_enet_out);
2281 clk_disable_unprepare(fep->clk_ipg);
2282 clk_disable_unprepare(fep->clk_ahb);
2283 free_netdev(ndev); 2646 free_netdev(ndev);
2284 2647
2285 return 0; 2648 return 0;
@@ -2296,12 +2659,8 @@ fec_suspend(struct device *dev)
2296 fec_stop(ndev); 2659 fec_stop(ndev);
2297 netif_device_detach(ndev); 2660 netif_device_detach(ndev);
2298 } 2661 }
2299 if (fep->clk_ptp) 2662 fec_enet_clk_enable(ndev, false);
2300 clk_disable_unprepare(fep->clk_ptp); 2663 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2301 if (fep->clk_enet_out)
2302 clk_disable_unprepare(fep->clk_enet_out);
2303 clk_disable_unprepare(fep->clk_ipg);
2304 clk_disable_unprepare(fep->clk_ahb);
2305 2664
2306 if (fep->reg_phy) 2665 if (fep->reg_phy)
2307 regulator_disable(fep->reg_phy); 2666 regulator_disable(fep->reg_phy);
@@ -2322,25 +2681,10 @@ fec_resume(struct device *dev)
2322 return ret; 2681 return ret;
2323 } 2682 }
2324 2683
2325 ret = clk_prepare_enable(fep->clk_ahb); 2684 pinctrl_pm_select_default_state(&fep->pdev->dev);
2685 ret = fec_enet_clk_enable(ndev, true);
2326 if (ret) 2686 if (ret)
2327 goto failed_clk_ahb; 2687 goto failed_clk;
2328
2329 ret = clk_prepare_enable(fep->clk_ipg);
2330 if (ret)
2331 goto failed_clk_ipg;
2332
2333 if (fep->clk_enet_out) {
2334 ret = clk_prepare_enable(fep->clk_enet_out);
2335 if (ret)
2336 goto failed_clk_enet_out;
2337 }
2338
2339 if (fep->clk_ptp) {
2340 ret = clk_prepare_enable(fep->clk_ptp);
2341 if (ret)
2342 goto failed_clk_ptp;
2343 }
2344 2688
2345 if (netif_running(ndev)) { 2689 if (netif_running(ndev)) {
2346 fec_restart(ndev, fep->full_duplex); 2690 fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2693,7 @@ fec_resume(struct device *dev)
2349 2693
2350 return 0; 2694 return 0;
2351 2695
2352failed_clk_ptp: 2696failed_clk:
2353 if (fep->clk_enet_out)
2354 clk_disable_unprepare(fep->clk_enet_out);
2355failed_clk_enet_out:
2356 clk_disable_unprepare(fep->clk_ipg);
2357failed_clk_ipg:
2358 clk_disable_unprepare(fep->clk_ahb);
2359failed_clk_ahb:
2360 if (fep->reg_phy) 2697 if (fep->reg_phy)
2361 regulator_disable(fep->reg_phy); 2698 regulator_disable(fep->reg_phy);
2362 return ret; 2699 return ret;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc80db41d6b3..cfaf17b70f3f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -792,10 +792,6 @@ static int fs_init_phy(struct net_device *dev)
792 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, 792 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
793 iface); 793 iface);
794 if (!phydev) { 794 if (!phydev) {
795 phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
796 iface);
797 }
798 if (!phydev) {
799 dev_err(&dev->dev, "Could not attach to PHY\n"); 795 dev_err(&dev->dev, "Could not attach to PHY\n");
800 return -ENODEV; 796 return -ENODEV;
801 } 797 }
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
1029 fpi->use_napi = 1; 1025 fpi->use_napi = 1;
1030 fpi->napi_weight = 17; 1026 fpi->napi_weight = 17;
1031 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 1027 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1032 if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", 1028 if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
1033 NULL))) 1029 err = of_phy_register_fixed_link(ofdev->dev.of_node);
1034 goto out_free_fpi; 1030 if (err)
1031 goto out_free_fpi;
1032
1033 /* In the case of a fixed PHY, the DT node associated
1034 * to the PHY is the Ethernet MAC DT node.
1035 */
1036 fpi->phy_node = ofdev->dev.of_node;
1037 }
1035 1038
1036 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { 1039 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
1037 phy_connection_type = of_get_property(ofdev->dev.of_node, 1040 phy_connection_type = of_get_property(ofdev->dev.of_node,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ee6ddbd4f252..a6cf40e62f3a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -889,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
889 889
890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
891 891
892 /* In the case of a fixed PHY, the DT node associated
893 * to the PHY is the Ethernet MAC DT node.
894 */
895 if (of_phy_is_fixed_link(np)) {
896 err = of_phy_register_fixed_link(np);
897 if (err)
898 goto err_grp_init;
899
900 priv->phy_node = np;
901 }
902
892 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 903 /* Find the TBI PHY. If it's not there, we don't support SGMII */
893 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 904 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
894 905
@@ -1231,7 +1242,7 @@ static void gfar_hw_init(struct gfar_private *priv)
1231 gfar_write_isrg(priv); 1242 gfar_write_isrg(priv);
1232} 1243}
1233 1244
1234static void __init gfar_init_addr_hash_table(struct gfar_private *priv) 1245static void gfar_init_addr_hash_table(struct gfar_private *priv)
1235{ 1246{
1236 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1247 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1237 1248
@@ -1373,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
1373 1384
1374 gfar_hw_init(priv); 1385 gfar_hw_init(priv);
1375 1386
1387 /* Carrier starts down, phylib will bring it up */
1388 netif_carrier_off(dev);
1389
1376 err = register_netdev(dev); 1390 err = register_netdev(dev);
1377 1391
1378 if (err) { 1392 if (err) {
@@ -1380,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
1380 goto register_fail; 1394 goto register_fail;
1381 } 1395 }
1382 1396
1383 /* Carrier starts down, phylib will bring it up */
1384 netif_carrier_off(dev);
1385
1386 device_init_wakeup(&dev->dev, 1397 device_init_wakeup(&dev->dev,
1387 priv->device_flags & 1398 priv->device_flags &
1388 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1399 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1660,9 +1671,6 @@ static int init_phy(struct net_device *dev)
1660 1671
1661 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1672 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1662 interface); 1673 interface);
1663 if (!priv->phydev)
1664 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1665 interface);
1666 if (!priv->phydev) { 1674 if (!priv->phydev) {
1667 dev_err(&dev->dev, "could not attach to PHY\n"); 1675 dev_err(&dev->dev, "could not attach to PHY\n");
1668 return -ENODEV; 1676 return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c8299c31b21f..fab39e295441 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
1728 1728
1729 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1729 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
1730 priv->phy_interface); 1730 priv->phy_interface);
1731 if (!phydev)
1732 phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1733 priv->phy_interface);
1734 if (!phydev) { 1731 if (!phydev) {
1735 dev_err(&dev->dev, "Could not attach to PHY\n"); 1732 dev_err(&dev->dev, "Could not attach to PHY\n");
1736 return -ENODEV; 1733 return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3790 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3787 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3791 3788
3792 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); 3789 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
3790 if (!ug_info->phy_node) {
3791 /* In the case of a fixed PHY, the DT node associated
3792 * to the PHY is the Ethernet MAC DT node.
3793 */
3794 if (of_phy_is_fixed_link(np)) {
3795 err = of_phy_register_fixed_link(np);
3796 if (err)
3797 return err;
3798 }
3799 ug_info->phy_node = np;
3800 }
3793 3801
3794 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3802 /* Find the TBI PHY node. If it's not there, we don't support SGMII */
3795 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3803 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 413329eff2ff..cc83350d56ba 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
417 417
418void uec_set_ethtool_ops(struct net_device *netdev) 418void uec_set_ethtool_ops(struct net_device *netdev)
419{ 419{
420 SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); 420 netdev->ethtool_ops = &uec_ethtool_ops;
421} 421}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d449fcb90199..0c9d55c862ae 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -162,7 +162,9 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
162 162
163 /* Return all Fs if nothing was there */ 163 /* Return all Fs if nothing was there */
164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) { 164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
165 dev_err(&bus->dev, "MDIO read error\n"); 165 dev_err(&bus->dev,
166 "Error while reading PHY%d reg at %d.%d\n",
167 phy_id, dev_addr, regnum);
166 return 0xffff; 168 return 0xffff;
167 } 169 }
168 170
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7becab1aa3e4..cfe7a7431730 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
256 dev->netdev_ops = &fjn_netdev_ops; 256 dev->netdev_ops = &fjn_netdev_ops;
257 dev->watchdog_timeo = TX_TIMEOUT; 257 dev->watchdog_timeo = TX_TIMEOUT;
258 258
259 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 259 dev->ethtool_ops = &netdev_ethtool_ops;
260 260
261 return fmvj18x_config(link); 261 return fmvj18x_config(link);
262} /* fmvj18x_attach */ 262} /* fmvj18x_attach */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 000000000000..e9421731b05e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,27 @@
1#
2# HISILICON device configuration
3#
4
5config NET_VENDOR_HISILICON
6 bool "Hisilicon devices"
7 default y
8 depends on ARM
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Hisilicon devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_HISILICON
20
21config HIX5HD2_GMAC
22 tristate "Hisilicon HIX5HD2 Family Network Device Support"
23 select PHYLIB
24 help
25 This selects the hix5hd2 mac family network device.
26
27endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 000000000000..9175e84622d4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the HISILICON network device drivers.
3#
4
5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 000000000000..0ffdcd381fdd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1066 @@
1/* Copyright (c) 2014 Linaro Ltd.
2 * Copyright (c) 2014 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/of_net.h>
15#include <linux/of_mdio.h>
16#include <linux/clk.h>
17#include <linux/circ_buf.h>
18
19#define STATION_ADDR_LOW 0x0000
20#define STATION_ADDR_HIGH 0x0004
21#define MAC_DUPLEX_HALF_CTRL 0x0008
22#define MAX_FRM_SIZE 0x003c
23#define PORT_MODE 0x0040
24#define PORT_EN 0x0044
25#define BITS_TX_EN BIT(2)
26#define BITS_RX_EN BIT(1)
27#define REC_FILT_CONTROL 0x0064
28#define BIT_CRC_ERR_PASS BIT(5)
29#define BIT_PAUSE_FRM_PASS BIT(4)
30#define BIT_VLAN_DROP_EN BIT(3)
31#define BIT_BC_DROP_EN BIT(2)
32#define BIT_MC_MATCH_EN BIT(1)
33#define BIT_UC_MATCH_EN BIT(0)
34#define PORT_MC_ADDR_LOW 0x0068
35#define PORT_MC_ADDR_HIGH 0x006C
36#define CF_CRC_STRIP 0x01b0
37#define MODE_CHANGE_EN 0x01b4
38#define BIT_MODE_CHANGE_EN BIT(0)
39#define COL_SLOT_TIME 0x01c0
40#define RECV_CONTROL 0x01e0
41#define BIT_STRIP_PAD_EN BIT(3)
42#define BIT_RUNT_PKT_EN BIT(4)
43#define CONTROL_WORD 0x0214
44#define MDIO_SINGLE_CMD 0x03c0
45#define MDIO_SINGLE_DATA 0x03c4
46#define MDIO_CTRL 0x03cc
47#define MDIO_RDATA_STATUS 0x03d0
48
49#define MDIO_START BIT(20)
50#define MDIO_R_VALID BIT(0)
51#define MDIO_READ (BIT(17) | MDIO_START)
52#define MDIO_WRITE (BIT(16) | MDIO_START)
53
54#define RX_FQ_START_ADDR 0x0500
55#define RX_FQ_DEPTH 0x0504
56#define RX_FQ_WR_ADDR 0x0508
57#define RX_FQ_RD_ADDR 0x050c
58#define RX_FQ_VLDDESC_CNT 0x0510
59#define RX_FQ_ALEMPTY_TH 0x0514
60#define RX_FQ_REG_EN 0x0518
61#define BITS_RX_FQ_START_ADDR_EN BIT(2)
62#define BITS_RX_FQ_DEPTH_EN BIT(1)
63#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
64#define RX_FQ_ALFULL_TH 0x051c
65#define RX_BQ_START_ADDR 0x0520
66#define RX_BQ_DEPTH 0x0524
67#define RX_BQ_WR_ADDR 0x0528
68#define RX_BQ_RD_ADDR 0x052c
69#define RX_BQ_FREE_DESC_CNT 0x0530
70#define RX_BQ_ALEMPTY_TH 0x0534
71#define RX_BQ_REG_EN 0x0538
72#define BITS_RX_BQ_START_ADDR_EN BIT(2)
73#define BITS_RX_BQ_DEPTH_EN BIT(1)
74#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
75#define RX_BQ_ALFULL_TH 0x053c
76#define TX_BQ_START_ADDR 0x0580
77#define TX_BQ_DEPTH 0x0584
78#define TX_BQ_WR_ADDR 0x0588
79#define TX_BQ_RD_ADDR 0x058c
80#define TX_BQ_VLDDESC_CNT 0x0590
81#define TX_BQ_ALEMPTY_TH 0x0594
82#define TX_BQ_REG_EN 0x0598
83#define BITS_TX_BQ_START_ADDR_EN BIT(2)
84#define BITS_TX_BQ_DEPTH_EN BIT(1)
85#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
86#define TX_BQ_ALFULL_TH 0x059c
87#define TX_RQ_START_ADDR 0x05a0
88#define TX_RQ_DEPTH 0x05a4
89#define TX_RQ_WR_ADDR 0x05a8
90#define TX_RQ_RD_ADDR 0x05ac
91#define TX_RQ_FREE_DESC_CNT 0x05b0
92#define TX_RQ_ALEMPTY_TH 0x05b4
93#define TX_RQ_REG_EN 0x05b8
94#define BITS_TX_RQ_START_ADDR_EN BIT(2)
95#define BITS_TX_RQ_DEPTH_EN BIT(1)
96#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
97#define TX_RQ_ALFULL_TH 0x05bc
98#define RAW_PMU_INT 0x05c0
99#define ENA_PMU_INT 0x05c4
100#define STATUS_PMU_INT 0x05c8
101#define MAC_FIFO_ERR_IN BIT(30)
102#define TX_RQ_IN_TIMEOUT_INT BIT(29)
103#define RX_BQ_IN_TIMEOUT_INT BIT(28)
104#define TXOUTCFF_FULL_INT BIT(27)
105#define TXOUTCFF_EMPTY_INT BIT(26)
106#define TXCFF_FULL_INT BIT(25)
107#define TXCFF_EMPTY_INT BIT(24)
108#define RXOUTCFF_FULL_INT BIT(23)
109#define RXOUTCFF_EMPTY_INT BIT(22)
110#define RXCFF_FULL_INT BIT(21)
111#define RXCFF_EMPTY_INT BIT(20)
112#define TX_RQ_IN_INT BIT(19)
113#define TX_BQ_OUT_INT BIT(18)
114#define RX_BQ_IN_INT BIT(17)
115#define RX_FQ_OUT_INT BIT(16)
116#define TX_RQ_EMPTY_INT BIT(15)
117#define TX_RQ_FULL_INT BIT(14)
118#define TX_RQ_ALEMPTY_INT BIT(13)
119#define TX_RQ_ALFULL_INT BIT(12)
120#define TX_BQ_EMPTY_INT BIT(11)
121#define TX_BQ_FULL_INT BIT(10)
122#define TX_BQ_ALEMPTY_INT BIT(9)
123#define TX_BQ_ALFULL_INT BIT(8)
124#define RX_BQ_EMPTY_INT BIT(7)
125#define RX_BQ_FULL_INT BIT(6)
126#define RX_BQ_ALEMPTY_INT BIT(5)
127#define RX_BQ_ALFULL_INT BIT(4)
128#define RX_FQ_EMPTY_INT BIT(3)
129#define RX_FQ_FULL_INT BIT(2)
130#define RX_FQ_ALEMPTY_INT BIT(1)
131#define RX_FQ_ALFULL_INT BIT(0)
132
133#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
134 TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
135
136#define DESC_WR_RD_ENA 0x05cc
137#define IN_QUEUE_TH 0x05d8
138#define OUT_QUEUE_TH 0x05dc
139#define QUEUE_TX_BQ_SHIFT 16
140#define RX_BQ_IN_TIMEOUT_TH 0x05e0
141#define TX_RQ_IN_TIMEOUT_TH 0x05e4
142#define STOP_CMD 0x05e8
143#define BITS_TX_STOP BIT(1)
144#define BITS_RX_STOP BIT(0)
145#define FLUSH_CMD 0x05eC
146#define BITS_TX_FLUSH_CMD BIT(5)
147#define BITS_RX_FLUSH_CMD BIT(4)
148#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
149#define BITS_TX_FLUSH_FLAG_UP BIT(2)
150#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
151#define BITS_RX_FLUSH_FLAG_UP BIT(0)
152#define RX_CFF_NUM_REG 0x05f0
153#define PMU_FSM_REG 0x05f8
154#define RX_FIFO_PKT_IN_NUM 0x05fc
155#define RX_FIFO_PKT_OUT_NUM 0x0600
156
157#define RGMII_SPEED_1000 0x2c
158#define RGMII_SPEED_100 0x2f
159#define RGMII_SPEED_10 0x2d
160#define MII_SPEED_100 0x0f
161#define MII_SPEED_10 0x0d
162#define GMAC_SPEED_1000 0x05
163#define GMAC_SPEED_100 0x01
164#define GMAC_SPEED_10 0x00
165#define GMAC_FULL_DUPLEX BIT(4)
166
167#define RX_BQ_INT_THRESHOLD 0x01
168#define TX_RQ_INT_THRESHOLD 0x01
169#define RX_BQ_IN_TIMEOUT 0x10000
170#define TX_RQ_IN_TIMEOUT 0x50000
171
172#define MAC_MAX_FRAME_SIZE 1600
173#define DESC_SIZE 32
174#define RX_DESC_NUM 1024
175#define TX_DESC_NUM 1024
176
177#define DESC_VLD_FREE 0
178#define DESC_VLD_BUSY 0x80000000
179#define DESC_FL_MID 0
180#define DESC_FL_LAST 0x20000000
181#define DESC_FL_FIRST 0x40000000
182#define DESC_FL_FULL 0x60000000
183#define DESC_DATA_LEN_OFF 16
184#define DESC_BUFF_LEN_OFF 0
185#define DESC_DATA_MASK 0x7ff
186
187/* DMA descriptor ring helpers */
188#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
189#define dma_cnt(n) ((n) >> 5)
190#define dma_byte(n) ((n) << 5)
191
192struct hix5hd2_desc {
193 __le32 buff_addr;
194 __le32 cmd;
195} __aligned(32);
196
197struct hix5hd2_desc_sw {
198 struct hix5hd2_desc *desc;
199 dma_addr_t phys_addr;
200 unsigned int count;
201 unsigned int size;
202};
203
204#define QUEUE_NUMS 4
205struct hix5hd2_priv {
206 struct hix5hd2_desc_sw pool[QUEUE_NUMS];
207#define rx_fq pool[0]
208#define rx_bq pool[1]
209#define tx_bq pool[2]
210#define tx_rq pool[3]
211
212 void __iomem *base;
213 void __iomem *ctrl_base;
214
215 struct sk_buff *tx_skb[TX_DESC_NUM];
216 struct sk_buff *rx_skb[RX_DESC_NUM];
217
218 struct device *dev;
219 struct net_device *netdev;
220
221 struct phy_device *phy;
222 struct device_node *phy_node;
223 phy_interface_t phy_mode;
224
225 unsigned int speed;
226 unsigned int duplex;
227
228 struct clk *clk;
229 struct mii_bus *bus;
230 struct napi_struct napi;
231 struct work_struct tx_timeout_task;
232};
233
234static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
235{
236 struct hix5hd2_priv *priv = netdev_priv(dev);
237 u32 val;
238
239 priv->speed = speed;
240 priv->duplex = duplex;
241
242 switch (priv->phy_mode) {
243 case PHY_INTERFACE_MODE_RGMII:
244 if (speed == SPEED_1000)
245 val = RGMII_SPEED_1000;
246 else if (speed == SPEED_100)
247 val = RGMII_SPEED_100;
248 else
249 val = RGMII_SPEED_10;
250 break;
251 case PHY_INTERFACE_MODE_MII:
252 if (speed == SPEED_100)
253 val = MII_SPEED_100;
254 else
255 val = MII_SPEED_10;
256 break;
257 default:
258 netdev_warn(dev, "not supported mode\n");
259 val = MII_SPEED_10;
260 break;
261 }
262
263 if (duplex)
264 val |= GMAC_FULL_DUPLEX;
265 writel_relaxed(val, priv->ctrl_base);
266
267 writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
268 if (speed == SPEED_1000)
269 val = GMAC_SPEED_1000;
270 else if (speed == SPEED_100)
271 val = GMAC_SPEED_100;
272 else
273 val = GMAC_SPEED_10;
274 writel_relaxed(val, priv->base + PORT_MODE);
275 writel_relaxed(0, priv->base + MODE_CHANGE_EN);
276 writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
277}
278
279static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
280{
281 writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
282 writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
283 writel_relaxed(0, priv->base + RX_FQ_REG_EN);
284
285 writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
286 writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
287 writel_relaxed(0, priv->base + RX_BQ_REG_EN);
288
289 writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
290 writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
291 writel_relaxed(0, priv->base + TX_BQ_REG_EN);
292
293 writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
294 writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
295 writel_relaxed(0, priv->base + TX_RQ_REG_EN);
296}
297
298static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
299{
300 writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
301 writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
302 writel_relaxed(0, priv->base + RX_FQ_REG_EN);
303}
304
305static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
306{
307 writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
308 writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
309 writel_relaxed(0, priv->base + RX_BQ_REG_EN);
310}
311
312static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
313{
314 writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
315 writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
316 writel_relaxed(0, priv->base + TX_BQ_REG_EN);
317}
318
319static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
320{
321 writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
322 writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
323 writel_relaxed(0, priv->base + TX_RQ_REG_EN);
324}
325
326static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
327{
328 hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
329 hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
330 hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
331 hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
332}
333
334static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
335{
336 u32 val;
337
338 /* disable and clear all interrupts */
339 writel_relaxed(0, priv->base + ENA_PMU_INT);
340 writel_relaxed(~0, priv->base + RAW_PMU_INT);
341
342 writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
343 writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
344 writel_relaxed(0, priv->base + COL_SLOT_TIME);
345
346 val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
347 writel_relaxed(val, priv->base + IN_QUEUE_TH);
348
349 writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
350 writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
351
352 hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
353 hix5hd2_set_desc_addr(priv);
354}
355
356static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
357{
358 writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
359}
360
361static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
362{
363 writel_relaxed(0, priv->base + ENA_PMU_INT);
364}
365
366static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
367{
368 writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
369 writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
370}
371
372static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
373{
374 writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
375 writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
376}
377
378static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
379{
380 struct hix5hd2_priv *priv = netdev_priv(dev);
381 unsigned char *mac = dev->dev_addr;
382 u32 val;
383
384 val = mac[1] | (mac[0] << 8);
385 writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
386
387 val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
388 writel_relaxed(val, priv->base + STATION_ADDR_LOW);
389}
390
391static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
392{
393 int ret;
394
395 ret = eth_mac_addr(dev, p);
396 if (!ret)
397 hix5hd2_hw_set_mac_addr(dev);
398
399 return ret;
400}
401
402static void hix5hd2_adjust_link(struct net_device *dev)
403{
404 struct hix5hd2_priv *priv = netdev_priv(dev);
405 struct phy_device *phy = priv->phy;
406
407 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
408 hix5hd2_config_port(dev, phy->speed, phy->duplex);
409 phy_print_status(phy);
410 }
411}
412
413static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
414{
415 struct hix5hd2_desc *desc;
416 struct sk_buff *skb;
417 u32 start, end, num, pos, i;
418 u32 len = MAC_MAX_FRAME_SIZE;
419 dma_addr_t addr;
420
421 /* software write pointer */
422 start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
423 /* logic read pointer */
424 end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
425 num = CIRC_SPACE(start, end, RX_DESC_NUM);
426
427 for (i = 0, pos = start; i < num; i++) {
428 if (priv->rx_skb[pos]) {
429 break;
430 } else {
431 skb = netdev_alloc_skb_ip_align(priv->netdev, len);
432 if (unlikely(skb == NULL))
433 break;
434 }
435
436 addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
437 if (dma_mapping_error(priv->dev, addr)) {
438 dev_kfree_skb_any(skb);
439 break;
440 }
441
442 desc = priv->rx_fq.desc + pos;
443 desc->buff_addr = cpu_to_le32(addr);
444 priv->rx_skb[pos] = skb;
445 desc->cmd = cpu_to_le32(DESC_VLD_FREE |
446 (len - 1) << DESC_BUFF_LEN_OFF);
447 pos = dma_ring_incr(pos, RX_DESC_NUM);
448 }
449
450 /* ensure desc updated */
451 wmb();
452
453 if (pos != start)
454 writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
455}
456
457static int hix5hd2_rx(struct net_device *dev, int limit)
458{
459 struct hix5hd2_priv *priv = netdev_priv(dev);
460 struct sk_buff *skb;
461 struct hix5hd2_desc *desc;
462 dma_addr_t addr;
463 u32 start, end, num, pos, i, len;
464
465 /* software read pointer */
466 start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
467 /* logic write pointer */
468 end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
469 num = CIRC_CNT(end, start, RX_DESC_NUM);
470 if (num > limit)
471 num = limit;
472
473 /* ensure get updated desc */
474 rmb();
475 for (i = 0, pos = start; i < num; i++) {
476 skb = priv->rx_skb[pos];
477 if (unlikely(!skb)) {
478 netdev_err(dev, "inconsistent rx_skb\n");
479 break;
480 }
481 priv->rx_skb[pos] = NULL;
482
483 desc = priv->rx_bq.desc + pos;
484 len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
485 DESC_DATA_MASK;
486 addr = le32_to_cpu(desc->buff_addr);
487 dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
488 DMA_FROM_DEVICE);
489
490 skb_put(skb, len);
491 if (skb->len > MAC_MAX_FRAME_SIZE) {
492 netdev_err(dev, "rcv len err, len = %d\n", skb->len);
493 dev->stats.rx_errors++;
494 dev->stats.rx_length_errors++;
495 dev_kfree_skb_any(skb);
496 goto next;
497 }
498
499 skb->protocol = eth_type_trans(skb, dev);
500 napi_gro_receive(&priv->napi, skb);
501 dev->stats.rx_packets++;
502 dev->stats.rx_bytes += skb->len;
503 dev->last_rx = jiffies;
504next:
505 pos = dma_ring_incr(pos, RX_DESC_NUM);
506 }
507
508 if (pos != start)
509 writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
510
511 hix5hd2_rx_refill(priv);
512
513 return num;
514}
515
516static void hix5hd2_xmit_reclaim(struct net_device *dev)
517{
518 struct sk_buff *skb;
519 struct hix5hd2_desc *desc;
520 struct hix5hd2_priv *priv = netdev_priv(dev);
521 unsigned int bytes_compl = 0, pkts_compl = 0;
522 u32 start, end, num, pos, i;
523 dma_addr_t addr;
524
525 netif_tx_lock(dev);
526
527 /* software read */
528 start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
529 /* logic write */
530 end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
531 num = CIRC_CNT(end, start, TX_DESC_NUM);
532
533 for (i = 0, pos = start; i < num; i++) {
534 skb = priv->tx_skb[pos];
535 if (unlikely(!skb)) {
536 netdev_err(dev, "inconsistent tx_skb\n");
537 break;
538 }
539
540 pkts_compl++;
541 bytes_compl += skb->len;
542 desc = priv->tx_rq.desc + pos;
543 addr = le32_to_cpu(desc->buff_addr);
544 dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
545 priv->tx_skb[pos] = NULL;
546 dev_consume_skb_any(skb);
547 pos = dma_ring_incr(pos, TX_DESC_NUM);
548 }
549
550 if (pos != start)
551 writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
552
553 netif_tx_unlock(dev);
554
555 if (pkts_compl || bytes_compl)
556 netdev_completed_queue(dev, pkts_compl, bytes_compl);
557
558 if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
559 netif_wake_queue(priv->netdev);
560}
561
562static int hix5hd2_poll(struct napi_struct *napi, int budget)
563{
564 struct hix5hd2_priv *priv = container_of(napi,
565 struct hix5hd2_priv, napi);
566 struct net_device *dev = priv->netdev;
567 int work_done = 0, task = budget;
568 int ints, num;
569
570 do {
571 hix5hd2_xmit_reclaim(dev);
572 num = hix5hd2_rx(dev, task);
573 work_done += num;
574 task -= num;
575 if ((work_done >= budget) || (num == 0))
576 break;
577
578 ints = readl_relaxed(priv->base + RAW_PMU_INT);
579 writel_relaxed(ints, priv->base + RAW_PMU_INT);
580 } while (ints & DEF_INT_MASK);
581
582 if (work_done < budget) {
583 napi_complete(napi);
584 hix5hd2_irq_enable(priv);
585 }
586
587 return work_done;
588}
589
590static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
591{
592 struct net_device *dev = (struct net_device *)dev_id;
593 struct hix5hd2_priv *priv = netdev_priv(dev);
594 int ints = readl_relaxed(priv->base + RAW_PMU_INT);
595
596 writel_relaxed(ints, priv->base + RAW_PMU_INT);
597 if (likely(ints & DEF_INT_MASK)) {
598 hix5hd2_irq_disable(priv);
599 napi_schedule(&priv->napi);
600 }
601
602 return IRQ_HANDLED;
603}
604
605static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
606{
607 struct hix5hd2_priv *priv = netdev_priv(dev);
608 struct hix5hd2_desc *desc;
609 dma_addr_t addr;
610 u32 pos;
611
612 /* software write pointer */
613 pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
614 if (unlikely(priv->tx_skb[pos])) {
615 dev->stats.tx_dropped++;
616 dev->stats.tx_fifo_errors++;
617 netif_stop_queue(dev);
618 return NETDEV_TX_BUSY;
619 }
620
621 addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
622 if (dma_mapping_error(priv->dev, addr)) {
623 dev_kfree_skb_any(skb);
624 return NETDEV_TX_OK;
625 }
626
627 desc = priv->tx_bq.desc + pos;
628 desc->buff_addr = cpu_to_le32(addr);
629 priv->tx_skb[pos] = skb;
630 desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
631 (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
632 (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
633
634 /* ensure desc updated */
635 wmb();
636
637 pos = dma_ring_incr(pos, TX_DESC_NUM);
638 writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
639
640 dev->trans_start = jiffies;
641 dev->stats.tx_packets++;
642 dev->stats.tx_bytes += skb->len;
643 netdev_sent_queue(dev, skb->len);
644
645 return NETDEV_TX_OK;
646}
647
648static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
649{
650 struct hix5hd2_desc *desc;
651 dma_addr_t addr;
652 int i;
653
654 for (i = 0; i < RX_DESC_NUM; i++) {
655 struct sk_buff *skb = priv->rx_skb[i];
656 if (skb == NULL)
657 continue;
658
659 desc = priv->rx_fq.desc + i;
660 addr = le32_to_cpu(desc->buff_addr);
661 dma_unmap_single(priv->dev, addr,
662 MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb);
664 priv->rx_skb[i] = NULL;
665 }
666
667 for (i = 0; i < TX_DESC_NUM; i++) {
668 struct sk_buff *skb = priv->tx_skb[i];
669 if (skb == NULL)
670 continue;
671
672 desc = priv->tx_rq.desc + i;
673 addr = le32_to_cpu(desc->buff_addr);
674 dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
675 dev_kfree_skb_any(skb);
676 priv->tx_skb[i] = NULL;
677 }
678}
679
680static int hix5hd2_net_open(struct net_device *dev)
681{
682 struct hix5hd2_priv *priv = netdev_priv(dev);
683 int ret;
684
685 ret = clk_prepare_enable(priv->clk);
686 if (ret < 0) {
687 netdev_err(dev, "failed to enable clk %d\n", ret);
688 return ret;
689 }
690
691 priv->phy = of_phy_connect(dev, priv->phy_node,
692 &hix5hd2_adjust_link, 0, priv->phy_mode);
693 if (!priv->phy)
694 return -ENODEV;
695
696 phy_start(priv->phy);
697 hix5hd2_hw_init(priv);
698 hix5hd2_rx_refill(priv);
699
700 netdev_reset_queue(dev);
701 netif_start_queue(dev);
702 napi_enable(&priv->napi);
703
704 hix5hd2_port_enable(priv);
705 hix5hd2_irq_enable(priv);
706
707 return 0;
708}
709
710static int hix5hd2_net_close(struct net_device *dev)
711{
712 struct hix5hd2_priv *priv = netdev_priv(dev);
713
714 hix5hd2_port_disable(priv);
715 hix5hd2_irq_disable(priv);
716 napi_disable(&priv->napi);
717 netif_stop_queue(dev);
718 hix5hd2_free_dma_desc_rings(priv);
719
720 if (priv->phy) {
721 phy_stop(priv->phy);
722 phy_disconnect(priv->phy);
723 }
724
725 clk_disable_unprepare(priv->clk);
726
727 return 0;
728}
729
730static void hix5hd2_tx_timeout_task(struct work_struct *work)
731{
732 struct hix5hd2_priv *priv;
733
734 priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
735 hix5hd2_net_close(priv->netdev);
736 hix5hd2_net_open(priv->netdev);
737}
738
739static void hix5hd2_net_timeout(struct net_device *dev)
740{
741 struct hix5hd2_priv *priv = netdev_priv(dev);
742
743 schedule_work(&priv->tx_timeout_task);
744}
745
746static const struct net_device_ops hix5hd2_netdev_ops = {
747 .ndo_open = hix5hd2_net_open,
748 .ndo_stop = hix5hd2_net_close,
749 .ndo_start_xmit = hix5hd2_net_xmit,
750 .ndo_tx_timeout = hix5hd2_net_timeout,
751 .ndo_set_mac_address = hix5hd2_net_set_mac_address,
752};
753
754static int hix5hd2_get_settings(struct net_device *net_dev,
755 struct ethtool_cmd *cmd)
756{
757 struct hix5hd2_priv *priv = netdev_priv(net_dev);
758
759 if (!priv->phy)
760 return -ENODEV;
761
762 return phy_ethtool_gset(priv->phy, cmd);
763}
764
765static int hix5hd2_set_settings(struct net_device *net_dev,
766 struct ethtool_cmd *cmd)
767{
768 struct hix5hd2_priv *priv = netdev_priv(net_dev);
769
770 if (!priv->phy)
771 return -ENODEV;
772
773 return phy_ethtool_sset(priv->phy, cmd);
774}
775
776static struct ethtool_ops hix5hd2_ethtools_ops = {
777 .get_link = ethtool_op_get_link,
778 .get_settings = hix5hd2_get_settings,
779 .set_settings = hix5hd2_set_settings,
780};
781
782static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
783{
784 struct hix5hd2_priv *priv = bus->priv;
785 void __iomem *base = priv->base;
786 int i, timeout = 10000;
787
788 for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
789 if (i == timeout)
790 return -ETIMEDOUT;
791 usleep_range(10, 20);
792 }
793
794 return 0;
795}
796
797static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
798{
799 struct hix5hd2_priv *priv = bus->priv;
800 void __iomem *base = priv->base;
801 int val, ret;
802
803 ret = hix5hd2_mdio_wait_ready(bus);
804 if (ret < 0)
805 goto out;
806
807 writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
808 ret = hix5hd2_mdio_wait_ready(bus);
809 if (ret < 0)
810 goto out;
811
812 val = readl_relaxed(base + MDIO_RDATA_STATUS);
813 if (val & MDIO_R_VALID) {
814 dev_err(bus->parent, "SMI bus read not valid\n");
815 ret = -ENODEV;
816 goto out;
817 }
818
819 val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
820 ret = (val >> 16) & 0xFFFF;
821out:
822 return ret;
823}
824
825static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
826{
827 struct hix5hd2_priv *priv = bus->priv;
828 void __iomem *base = priv->base;
829 int ret;
830
831 ret = hix5hd2_mdio_wait_ready(bus);
832 if (ret < 0)
833 goto out;
834
835 writel_relaxed(val, base + MDIO_SINGLE_DATA);
836 writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
837 ret = hix5hd2_mdio_wait_ready(bus);
838out:
839 return ret;
840}
841
842static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
843{
844 int i;
845
846 for (i = 0; i < QUEUE_NUMS; i++) {
847 if (priv->pool[i].desc) {
848 dma_free_coherent(priv->dev, priv->pool[i].size,
849 priv->pool[i].desc,
850 priv->pool[i].phys_addr);
851 priv->pool[i].desc = NULL;
852 }
853 }
854}
855
856static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
857{
858 struct device *dev = priv->dev;
859 struct hix5hd2_desc *virt_addr;
860 dma_addr_t phys_addr;
861 int size, i;
862
863 priv->rx_fq.count = RX_DESC_NUM;
864 priv->rx_bq.count = RX_DESC_NUM;
865 priv->tx_bq.count = TX_DESC_NUM;
866 priv->tx_rq.count = TX_DESC_NUM;
867
868 for (i = 0; i < QUEUE_NUMS; i++) {
869 size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
870 virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
871 GFP_KERNEL);
872 if (virt_addr == NULL)
873 goto error_free_pool;
874
875 memset(virt_addr, 0, size);
876 priv->pool[i].size = size;
877 priv->pool[i].desc = virt_addr;
878 priv->pool[i].phys_addr = phys_addr;
879 }
880 return 0;
881
882error_free_pool:
883 hix5hd2_destroy_hw_desc_queue(priv);
884
885 return -ENOMEM;
886}
887
888static int hix5hd2_dev_probe(struct platform_device *pdev)
889{
890 struct device *dev = &pdev->dev;
891 struct device_node *node = dev->of_node;
892 struct net_device *ndev;
893 struct hix5hd2_priv *priv;
894 struct resource *res;
895 struct mii_bus *bus;
896 const char *mac_addr;
897 int ret;
898
899 ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
900 if (!ndev)
901 return -ENOMEM;
902
903 platform_set_drvdata(pdev, ndev);
904
905 priv = netdev_priv(ndev);
906 priv->dev = dev;
907 priv->netdev = ndev;
908
909 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
910 priv->base = devm_ioremap_resource(dev, res);
911 if (IS_ERR(priv->base)) {
912 ret = PTR_ERR(priv->base);
913 goto out_free_netdev;
914 }
915
916 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
917 priv->ctrl_base = devm_ioremap_resource(dev, res);
918 if (IS_ERR(priv->ctrl_base)) {
919 ret = PTR_ERR(priv->ctrl_base);
920 goto out_free_netdev;
921 }
922
923 priv->clk = devm_clk_get(&pdev->dev, NULL);
924 if (IS_ERR(priv->clk)) {
925 netdev_err(ndev, "failed to get clk\n");
926 ret = -ENODEV;
927 goto out_free_netdev;
928 }
929
930 ret = clk_prepare_enable(priv->clk);
931 if (ret < 0) {
932 netdev_err(ndev, "failed to enable clk %d\n", ret);
933 goto out_free_netdev;
934 }
935
936 bus = mdiobus_alloc();
937 if (bus == NULL) {
938 ret = -ENOMEM;
939 goto out_free_netdev;
940 }
941
942 bus->priv = priv;
943 bus->name = "hix5hd2_mii_bus";
944 bus->read = hix5hd2_mdio_read;
945 bus->write = hix5hd2_mdio_write;
946 bus->parent = &pdev->dev;
947 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
948 priv->bus = bus;
949
950 ret = of_mdiobus_register(bus, node);
951 if (ret)
952 goto err_free_mdio;
953
954 priv->phy_mode = of_get_phy_mode(node);
955 if (priv->phy_mode < 0) {
956 netdev_err(ndev, "not find phy-mode\n");
957 ret = -EINVAL;
958 goto err_mdiobus;
959 }
960
961 priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
962 if (!priv->phy_node) {
963 netdev_err(ndev, "not find phy-handle\n");
964 ret = -EINVAL;
965 goto err_mdiobus;
966 }
967
968 ndev->irq = platform_get_irq(pdev, 0);
969 if (ndev->irq <= 0) {
970 netdev_err(ndev, "No irq resource\n");
971 ret = -EINVAL;
972 goto out_phy_node;
973 }
974
975 ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
976 0, pdev->name, ndev);
977 if (ret) {
978 netdev_err(ndev, "devm_request_irq failed\n");
979 goto out_phy_node;
980 }
981
982 mac_addr = of_get_mac_address(node);
983 if (mac_addr)
984 ether_addr_copy(ndev->dev_addr, mac_addr);
985 if (!is_valid_ether_addr(ndev->dev_addr)) {
986 eth_hw_addr_random(ndev);
987 netdev_warn(ndev, "using random MAC address %pM\n",
988 ndev->dev_addr);
989 }
990
991 INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
992 ndev->watchdog_timeo = 6 * HZ;
993 ndev->priv_flags |= IFF_UNICAST_FLT;
994 ndev->netdev_ops = &hix5hd2_netdev_ops;
995 ndev->ethtool_ops = &hix5hd2_ethtools_ops;
996 SET_NETDEV_DEV(ndev, dev);
997
998 ret = hix5hd2_init_hw_desc_queue(priv);
999 if (ret)
1000 goto out_phy_node;
1001
1002 netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
1003 ret = register_netdev(priv->netdev);
1004 if (ret) {
1005 netdev_err(ndev, "register_netdev failed!");
1006 goto out_destroy_queue;
1007 }
1008
1009 clk_disable_unprepare(priv->clk);
1010
1011 return ret;
1012
1013out_destroy_queue:
1014 netif_napi_del(&priv->napi);
1015 hix5hd2_destroy_hw_desc_queue(priv);
1016out_phy_node:
1017 of_node_put(priv->phy_node);
1018err_mdiobus:
1019 mdiobus_unregister(bus);
1020err_free_mdio:
1021 mdiobus_free(bus);
1022out_free_netdev:
1023 free_netdev(ndev);
1024
1025 return ret;
1026}
1027
1028static int hix5hd2_dev_remove(struct platform_device *pdev)
1029{
1030 struct net_device *ndev = platform_get_drvdata(pdev);
1031 struct hix5hd2_priv *priv = netdev_priv(ndev);
1032
1033 netif_napi_del(&priv->napi);
1034 unregister_netdev(ndev);
1035 mdiobus_unregister(priv->bus);
1036 mdiobus_free(priv->bus);
1037
1038 hix5hd2_destroy_hw_desc_queue(priv);
1039 of_node_put(priv->phy_node);
1040 cancel_work_sync(&priv->tx_timeout_task);
1041 free_netdev(ndev);
1042
1043 return 0;
1044}
1045
1046static const struct of_device_id hix5hd2_of_match[] = {
1047 {.compatible = "hisilicon,hix5hd2-gmac",},
1048 {},
1049};
1050
1051MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
1052
1053static struct platform_driver hix5hd2_dev_driver = {
1054 .driver = {
1055 .name = "hix5hd2-gmac",
1056 .of_match_table = hix5hd2_of_match,
1057 },
1058 .probe = hix5hd2_dev_probe,
1059 .remove = hix5hd2_dev_remove,
1060};
1061
1062module_platform_driver(hix5hd2_dev_driver);
1063
1064MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
1065MODULE_LICENSE("GPL v2");
1066MODULE_ALIAS("platform:hix5hd2-gmac");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 95837b99a464..85a3866459cf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -63,8 +63,8 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
63 cmd->duplex = port->full_duplex == 1 ? 63 cmd->duplex = port->full_duplex == 1 ?
64 DUPLEX_FULL : DUPLEX_HALF; 64 DUPLEX_FULL : DUPLEX_HALF;
65 } else { 65 } else {
66 speed = ~0; 66 speed = SPEED_UNKNOWN;
67 cmd->duplex = -1; 67 cmd->duplex = DUPLEX_UNKNOWN;
68 } 68 }
69 ethtool_cmd_speed_set(cmd, speed); 69 ethtool_cmd_speed_set(cmd, speed);
70 70
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
278 278
279void ehea_set_ethtool_ops(struct net_device *netdev) 279void ehea_set_ethtool_ops(struct net_device *netdev)
280{ 280{
281 SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops); 281 netdev->ethtool_ops = &ehea_ethtool_ops;
282} 282}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 538903bf13bc..a0b418e007a0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -28,6 +28,7 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/device.h>
31#include <linux/in.h> 32#include <linux/in.h>
32#include <linux/ip.h> 33#include <linux/ip.h>
33#include <linux/tcp.h> 34#include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
3273 return -EINVAL; 3274 return -EINVAL;
3274 } 3275 }
3275 3276
3276 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3277 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3277 if (!adapter) { 3278 if (!adapter) {
3278 ret = -ENOMEM; 3279 ret = -ENOMEM;
3279 dev_err(&dev->dev, "no mem for ehea_adapter\n"); 3280 dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
3359 3360
3360out_free_ad: 3361out_free_ad:
3361 list_del(&adapter->list); 3362 list_del(&adapter->list);
3362 kfree(adapter);
3363 3363
3364out: 3364out:
3365 ehea_update_firmware_handles(); 3365 ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
3386 ehea_destroy_eq(adapter->neq); 3386 ehea_destroy_eq(adapter->neq);
3387 ehea_remove_adapter_mr(adapter); 3387 ehea_remove_adapter_mr(adapter);
3388 list_del(&adapter->list); 3388 list_del(&adapter->list);
3389 kfree(adapter);
3390 3389
3391 ehea_update_firmware_handles(); 3390 ehea_update_firmware_handles();
3392 3391
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 9b03033bb557..a0820f72b25c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -103,12 +103,14 @@ out_nomem:
103 103
104static void hw_queue_dtor(struct hw_queue *queue) 104static void hw_queue_dtor(struct hw_queue *queue)
105{ 105{
106 int pages_per_kpage = PAGE_SIZE / queue->pagesize; 106 int pages_per_kpage;
107 int i, nr_pages; 107 int i, nr_pages;
108 108
109 if (!queue || !queue->queue_pages) 109 if (!queue || !queue->queue_pages)
110 return; 110 return;
111 111
112 pages_per_kpage = PAGE_SIZE / queue->pagesize;
113
112 nr_pages = queue->queue_length / queue->pagesize; 114 nr_pages = queue->queue_length / queue->pagesize;
113 115
114 for (i = 0; i < nr_pages; i += pages_per_kpage) 116 for (i = 0; i < nr_pages; i += pages_per_kpage)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ae342fdb42c8..87bd953cc2ee 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
2879 dev->commac.ops = &emac_commac_sg_ops; 2879 dev->commac.ops = &emac_commac_sg_ops;
2880 } else 2880 } else
2881 ndev->netdev_ops = &emac_netdev_ops; 2881 ndev->netdev_ops = &emac_netdev_ops;
2882 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2882 ndev->ethtool_ops = &emac_ethtool_ops;
2883 2883
2884 netif_carrier_off(ndev); 2884 netif_carrier_off(ndev);
2885 2885
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 25045ae07171..5727779a7df2 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2245 */ 2245 */
2246 dev->netdev_ops = &ipg_netdev_ops; 2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev); 2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249 2249
2250 rc = pci_request_regions(pdev, DRV_NAME); 2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc) 2251 if (rc)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index b56461ce674c..9d979d7debef 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2854 netdev->hw_features |= NETIF_F_RXALL; 2854 netdev->hw_features |= NETIF_F_RXALL;
2855 2855
2856 netdev->netdev_ops = &e100_netdev_ops; 2856 netdev->netdev_ops = &e100_netdev_ops;
2857 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2857 netdev->ethtool_ops = &e100_ethtool_ops;
2858 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; 2858 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2859 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2859 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2860 2860
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aeefb92a..d50f78afb56d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
168 else 168 else
169 ecmd->duplex = DUPLEX_HALF; 169 ecmd->duplex = DUPLEX_HALF;
170 } else { 170 } else {
171 ethtool_cmd_speed_set(ecmd, -1); 171 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
172 ecmd->duplex = -1; 172 ecmd->duplex = DUPLEX_UNKNOWN;
173 } 173 }
174 174
175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || 175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1460 * enough time to complete the receives, if it's 1460 * enough time to complete the receives, if it's
1461 * exceeded, break and error off 1461 * exceeded, break and error off
1462 */ 1462 */
1463 } while (good_cnt < 64 && jiffies < (time + 20)); 1463 } while (good_cnt < 64 && time_after(time + 20, jiffies));
1464
1464 if (good_cnt != 64) { 1465 if (good_cnt != 64) {
1465 ret_val = 13; /* ret_val is the same as mis-compare */ 1466 ret_val = 13; /* ret_val is the same as mis-compare */
1466 break; 1467 break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1905 1906
1906void e1000_set_ethtool_ops(struct net_device *netdev) 1907void e1000_set_ethtool_ops(struct net_device *netdev)
1907{ 1908{
1908 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); 1909 netdev->ethtool_ops = &e1000_ethtool_ops;
1909} 1910}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c1d3fdb296a0..e9b07ccc0eba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
4877 * since the test for a multicast frame will test positive on 4877 * since the test for a multicast frame will test positive on
4878 * a broadcast frame. 4878 * a broadcast frame.
4879 */ 4879 */
4880 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) 4880 if (is_broadcast_ether_addr(mac_addr))
4881 /* Broadcast packet */ 4881 /* Broadcast packet */
4882 stats->bprc++; 4882 stats->bprc++;
4883 else if (*mac_addr & 0x01) 4883 else if (is_multicast_ether_addr(mac_addr))
4884 /* Multicast packet */ 4884 /* Multicast packet */
4885 stats->mprc++; 4885 stats->mprc++;
4886 4886
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 27058dfe418b..660971f304b2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3105,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3105 */ 3105 */
3106 tx_ring = adapter->tx_ring; 3106 tx_ring = adapter->tx_ring;
3107 3107
3108 if (unlikely(skb->len <= 0)) {
3109 dev_kfree_skb_any(skb);
3110 return NETDEV_TX_OK;
3111 }
3112
3113 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3108 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3114 * packets may get corrupted during padding by HW. 3109 * packets may get corrupted during padding by HW.
3115 * To WA this issue, pad all small packets manually. 3110 * To WA this issue, pad all small packets manually.
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a5f6b11d6992..08f22f348800 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
1365 .setup_led = e1000e_setup_led_generic, 1365 .setup_led = e1000e_setup_led_generic,
1366 .config_collision_dist = e1000e_config_collision_dist_generic, 1366 .config_collision_dist = e1000e_config_collision_dist_generic,
1367 .rar_set = e1000e_rar_set_generic, 1367 .rar_set = e1000e_rar_set_generic,
1368 .rar_get_count = e1000e_rar_get_count_generic,
1368}; 1369};
1369 1370
1370static const struct e1000_phy_operations es2_phy_ops = { 1371static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index e0aa7f1efb08..218481e509f9 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
1896 .config_collision_dist = e1000e_config_collision_dist_generic, 1896 .config_collision_dist = e1000e_config_collision_dist_generic,
1897 .read_mac_addr = e1000_read_mac_addr_82571, 1897 .read_mac_addr = e1000_read_mac_addr_82571,
1898 .rar_set = e1000e_rar_set_generic, 1898 .rar_set = e1000e_rar_set_generic,
1899 .rar_get_count = e1000e_rar_get_count_generic,
1899}; 1900};
1900 1901
1901static const struct e1000_phy_operations e82_phy_ops_igp = { 1902static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..7785240a0da1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
265 u32 tx_hwtstamp_timeouts; 265 u32 tx_hwtstamp_timeouts;
266 266
267 /* Rx */ 267 /* Rx */
268 bool (*clean_rx) (struct e1000_ring *ring, int *work_done, 268 bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
269 int work_to_do) ____cacheline_aligned_in_smp; 269 int work_to_do) ____cacheline_aligned_in_smp;
270 void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, 270 void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
271 gfp_t gfp); 271 gfp_t gfp);
272 struct e1000_ring *rx_ring; 272 struct e1000_ring *rx_ring;
273 273
274 u32 rx_int_delay; 274 u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
391 * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours 391 * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
392 */ 392 */
393#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) 393#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
394#define E1000_MAX_82574_SYSTIM_REREADS 50
395#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
394 396
395/* hardware capability, feature, and workaround flags */ 397/* hardware capability, feature, and workaround flags */
396#define FLAG_HAS_AMT (1 << 0) 398#define FLAG_HAS_AMT (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
573 575
574#define er32(reg) __er32(hw, E1000_##reg) 576#define er32(reg) __er32(hw, E1000_##reg)
575 577
576/** 578s32 __ew32_prepare(struct e1000_hw *hw);
577 * __ew32_prepare - prepare to write to MAC CSR register on certain parts 579void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
578 * @hw: pointer to the HW structure
579 *
580 * When updating the MAC CSR registers, the Manageability Engine (ME) could
581 * be accessing the registers at the same time. Normally, this is handled in
582 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
583 * accesses later than it should which could result in the register to have
584 * an incorrect value. Workaround this by checking the FWSM register which
585 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
586 * and try again a number of times.
587 **/
588static inline s32 __ew32_prepare(struct e1000_hw *hw)
589{
590 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
591
592 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
593 udelay(50);
594
595 return i;
596}
597
598static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
599{
600 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
601 __ew32_prepare(hw);
602
603 writel(val, hw->hw_addr + reg);
604}
605 580
606#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) 581#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
607 582
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..815e26c6d34b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -159,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
159 ecmd->transceiver = XCVR_EXTERNAL; 159 ecmd->transceiver = XCVR_EXTERNAL;
160 } 160 }
161 161
162 speed = -1; 162 speed = SPEED_UNKNOWN;
163 ecmd->duplex = -1; 163 ecmd->duplex = DUPLEX_UNKNOWN;
164 164
165 if (netif_running(netdev)) { 165 if (netif_running(netdev)) {
166 if (netif_carrier_ok(netdev)) { 166 if (netif_carrier_ok(netdev)) {
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
169 } 169 }
170 } else if (!pm_runtime_suspended(netdev->dev.parent)) { 170 } else if (!pm_runtime_suspended(netdev->dev.parent)) {
171 u32 status = er32(STATUS); 171 u32 status = er32(STATUS);
172
172 if (status & E1000_STATUS_LU) { 173 if (status & E1000_STATUS_LU) {
173 if (status & E1000_STATUS_SPEED_1000) 174 if (status & E1000_STATUS_SPEED_1000)
174 speed = SPEED_1000; 175 speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
783 reg + (offset << 2), val, 784 reg + (offset << 2), val,
784 (test[pat] & write & mask)); 785 (test[pat] & write & mask));
785 *data = reg; 786 *data = reg;
786 return 1; 787 return true;
787 } 788 }
788 } 789 }
789 return 0; 790 return false;
790} 791}
791 792
792static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, 793static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
793 int reg, u32 mask, u32 write) 794 int reg, u32 mask, u32 write)
794{ 795{
795 u32 val; 796 u32 val;
797
796 __ew32(&adapter->hw, reg, write & mask); 798 __ew32(&adapter->hw, reg, write & mask);
797 val = __er32(&adapter->hw, reg); 799 val = __er32(&adapter->hw, reg);
798 if ((write & mask) != (val & mask)) { 800 if ((write & mask) != (val & mask)) {
799 e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", 801 e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
800 reg, (val & mask), (write & mask)); 802 reg, (val & mask), (write & mask));
801 *data = reg; 803 *data = reg;
802 return 1; 804 return true;
803 } 805 }
804 return 0; 806 return false;
805} 807}
806 808
807#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ 809#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1717 *data = 0; 1719 *data = 0;
1718 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1720 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1719 int i = 0; 1721 int i = 0;
1722
1720 hw->mac.serdes_has_link = false; 1723 hw->mac.serdes_has_link = false;
1721 1724
1722 /* On some blade server designs, link establishment 1725 /* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2315 2318
2316void e1000e_set_ethtool_ops(struct net_device *netdev) 2319void e1000e_set_ethtool_ops(struct net_device *netdev)
2317{ 2320{
2318 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); 2321 netdev->ethtool_ops = &e1000_ethtool_ops;
2319} 2322}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 6b3de5f39a97..72f5475c4b90 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
469 s32 (*setup_led)(struct e1000_hw *); 469 s32 (*setup_led)(struct e1000_hw *);
470 void (*write_vfta)(struct e1000_hw *, u32, u32); 470 void (*write_vfta)(struct e1000_hw *, u32, u32);
471 void (*config_collision_dist)(struct e1000_hw *); 471 void (*config_collision_dist)(struct e1000_hw *);
472 void (*rar_set)(struct e1000_hw *, u8 *, u32); 472 int (*rar_set)(struct e1000_hw *, u8 *, u32);
473 s32 (*read_mac_addr)(struct e1000_hw *); 473 s32 (*read_mac_addr)(struct e1000_hw *);
474 u32 (*rar_get_count)(struct e1000_hw *);
474}; 475};
475 476
476/* When to use various PHY register access functions: 477/* When to use various PHY register access functions:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..8894ab8ed6bd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
139static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 139static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
140static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 140static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
141static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 141static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
142static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 142static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
143static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 143static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
144static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
144static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 145static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
145static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 146static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
146static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); 147static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -704,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
704 mac->ops.rar_set = e1000_rar_set_pch_lpt; 705 mac->ops.rar_set = e1000_rar_set_pch_lpt;
705 mac->ops.setup_physical_interface = 706 mac->ops.setup_physical_interface =
706 e1000_setup_copper_link_pch_lpt; 707 e1000_setup_copper_link_pch_lpt;
708 mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
707 } 709 }
708 710
709 /* Enable PCS Lock-loss workaround for ICH8 */ 711 /* Enable PCS Lock-loss workaround for ICH8 */
@@ -1334,6 +1336,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1334 if (((hw->mac.type == e1000_pch2lan) || 1336 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) { 1337 (hw->mac.type == e1000_pch_lpt)) && link) {
1336 u32 reg; 1338 u32 reg;
1339
1337 reg = er32(STATUS); 1340 reg = er32(STATUS);
1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1341 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr; 1342 u16 emi_addr;
@@ -1634,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1634 u32 fwsm; 1637 u32 fwsm;
1635 1638
1636 fwsm = er32(FWSM); 1639 fwsm = er32(FWSM);
1637 return ((fwsm & E1000_ICH_FWSM_FW_VALID) && 1640 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1638 ((fwsm & E1000_FWSM_MODE_MASK) == 1641 ((fwsm & E1000_FWSM_MODE_MASK) ==
1639 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); 1642 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1640} 1643}
1641 1644
1642/** 1645/**
@@ -1667,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1667 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 1670 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1668 * Use SHRA[0-3] in place of those reserved for ME. 1671 * Use SHRA[0-3] in place of those reserved for ME.
1669 **/ 1672 **/
1670static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 1673static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1671{ 1674{
1672 u32 rar_low, rar_high; 1675 u32 rar_low, rar_high;
1673 1676
@@ -1689,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1689 e1e_flush(); 1692 e1e_flush();
1690 ew32(RAH(index), rar_high); 1693 ew32(RAH(index), rar_high);
1691 e1e_flush(); 1694 e1e_flush();
1692 return; 1695 return 0;
1693 } 1696 }
1694 1697
1695 /* RAR[1-6] are owned by manageability. Skip those and program the 1698 /* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1712,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1712 /* verify the register updates */ 1715 /* verify the register updates */
1713 if ((er32(SHRAL(index - 1)) == rar_low) && 1716 if ((er32(SHRAL(index - 1)) == rar_low) &&
1714 (er32(SHRAH(index - 1)) == rar_high)) 1717 (er32(SHRAH(index - 1)) == rar_high))
1715 return; 1718 return 0;
1716 1719
1717 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 1720 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1718 (index - 1), er32(FWSM)); 1721 (index - 1), er32(FWSM));
@@ -1720,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1720 1723
1721out: 1724out:
1722 e_dbg("Failed to write receive address at index %d\n", index); 1725 e_dbg("Failed to write receive address at index %d\n", index);
1726 return -E1000_ERR_CONFIG;
1727}
1728
1729/**
1730 * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1731 * @hw: pointer to the HW structure
1732 *
1733 * Get the number of available receive registers that the Host can
1734 * program. SHRA[0-10] are the shared receive address registers
1735 * that are shared between the Host and manageability engine (ME).
1736 * ME can reserve any number of addresses and the host needs to be
1737 * able to tell how many available registers it has access to.
1738 **/
1739static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1740{
1741 u32 wlock_mac;
1742 u32 num_entries;
1743
1744 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1745 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1746
1747 switch (wlock_mac) {
1748 case 0:
1749 /* All SHRA[0..10] and RAR[0] available */
1750 num_entries = hw->mac.rar_entry_count;
1751 break;
1752 case 1:
1753 /* Only RAR[0] available */
1754 num_entries = 1;
1755 break;
1756 default:
1757 /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1758 num_entries = wlock_mac + 1;
1759 break;
1760 }
1761
1762 return num_entries;
1723} 1763}
1724 1764
1725/** 1765/**
@@ -1733,7 +1773,7 @@ out:
1733 * contain the MAC address. SHRA[0-10] are the shared receive address 1773 * contain the MAC address. SHRA[0-10] are the shared receive address
1734 * registers that are shared between the Host and manageability engine (ME). 1774 * registers that are shared between the Host and manageability engine (ME).
1735 **/ 1775 **/
1736static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 1776static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1737{ 1777{
1738 u32 rar_low, rar_high; 1778 u32 rar_low, rar_high;
1739 u32 wlock_mac; 1779 u32 wlock_mac;
@@ -1755,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1755 e1e_flush(); 1795 e1e_flush();
1756 ew32(RAH(index), rar_high); 1796 ew32(RAH(index), rar_high);
1757 e1e_flush(); 1797 e1e_flush();
1758 return; 1798 return 0;
1759 } 1799 }
1760 1800
1761 /* The manageability engine (ME) can lock certain SHRAR registers that 1801 /* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1787,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1787 /* verify the register updates */ 1827 /* verify the register updates */
1788 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && 1828 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1789 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) 1829 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1790 return; 1830 return 0;
1791 } 1831 }
1792 } 1832 }
1793 1833
1794out: 1834out:
1795 e_dbg("Failed to write receive address at index %d\n", index); 1835 e_dbg("Failed to write receive address at index %d\n", index);
1836 return -E1000_ERR_CONFIG;
1796} 1837}
1797 1838
1798/** 1839/**
@@ -4976,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
4976 /* id_led_init dependent on mac type */ 5017 /* id_led_init dependent on mac type */
4977 .config_collision_dist = e1000e_config_collision_dist_generic, 5018 .config_collision_dist = e1000e_config_collision_dist_generic,
4978 .rar_set = e1000e_rar_set_generic, 5019 .rar_set = e1000e_rar_set_generic,
5020 .rar_get_count = e1000e_rar_get_count_generic,
4979}; 5021};
4980 5022
4981static const struct e1000_phy_operations ich8_phy_ops = { 5023static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index baa0a466d1d0..8c386f3a15eb 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
211 return 0; 211 return 0;
212} 212}
213 213
214u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
215{
216 return hw->mac.rar_entry_count;
217}
218
214/** 219/**
215 * e1000e_rar_set_generic - Set receive address register 220 * e1000e_rar_set_generic - Set receive address register
216 * @hw: pointer to the HW structure 221 * @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
220 * Sets the receive address array register at index to the address passed 225 * Sets the receive address array register at index to the address passed
221 * in by addr. 226 * in by addr.
222 **/ 227 **/
223void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) 228int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
224{ 229{
225 u32 rar_low, rar_high; 230 u32 rar_low, rar_high;
226 231
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
244 e1e_flush(); 249 e1e_flush();
245 ew32(RAH(index), rar_high); 250 ew32(RAH(index), rar_high);
246 e1e_flush(); 251 e1e_flush();
252
253 return 0;
247} 254}
248 255
249/** 256/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 4e81c2825b7a..0513d90cdeea 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
61void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 61void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
62 62
63void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); 63void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
64void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); 64u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
65int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
65void e1000e_config_collision_dist_generic(struct e1000_hw *hw); 66void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
66 67
67#endif 68#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..201cc93f3625 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -124,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
124}; 124};
125 125
126/** 126/**
127 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
128 * @hw: pointer to the HW structure
129 *
130 * When updating the MAC CSR registers, the Manageability Engine (ME) could
131 * be accessing the registers at the same time. Normally, this is handled in
132 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
133 * accesses later than it should which could result in the register to have
134 * an incorrect value. Workaround this by checking the FWSM register which
135 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
136 * and try again a number of times.
137 **/
138s32 __ew32_prepare(struct e1000_hw *hw)
139{
140 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
141
142 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
143 udelay(50);
144
145 return i;
146}
147
148void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
149{
150 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
151 __ew32_prepare(hw);
152
153 writel(val, hw->hw_addr + reg);
154}
155
156/**
127 * e1000_regdump - register printout routine 157 * e1000_regdump - register printout routine
128 * @hw: pointer to the HW structure 158 * @hw: pointer to the HW structure
129 * @reginfo: pointer to the register info table 159 * @reginfo: pointer to the register info table
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
599 629
600 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { 630 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
601 u32 rctl = er32(RCTL); 631 u32 rctl = er32(RCTL);
632
602 ew32(RCTL, rctl & ~E1000_RCTL_EN); 633 ew32(RCTL, rctl & ~E1000_RCTL_EN);
603 e_err("ME firmware caused invalid RDT - resetting\n"); 634 e_err("ME firmware caused invalid RDT - resetting\n");
604 schedule_work(&adapter->reset_task); 635 schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
615 646
616 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { 647 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
617 u32 tctl = er32(TCTL); 648 u32 tctl = er32(TCTL);
649
618 ew32(TCTL, tctl & ~E1000_TCTL_EN); 650 ew32(TCTL, tctl & ~E1000_TCTL_EN);
619 e_err("ME firmware caused invalid TDT - resetting\n"); 651 e_err("ME firmware caused invalid TDT - resetting\n");
620 schedule_work(&adapter->reset_task); 652 schedule_work(&adapter->reset_task);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1198 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1230 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1199 (count < tx_ring->count)) { 1231 (count < tx_ring->count)) {
1200 bool cleaned = false; 1232 bool cleaned = false;
1233
1201 rmb(); /* read buffer_info after eop_desc */ 1234 rmb(); /* read buffer_info after eop_desc */
1202 for (; !cleaned; count++) { 1235 for (; !cleaned; count++) {
1203 tx_desc = E1000_TX_DESC(*tx_ring, i); 1236 tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1753 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1786 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1754 /* disable receives */ 1787 /* disable receives */
1755 u32 rctl = er32(RCTL); 1788 u32 rctl = er32(RCTL);
1789
1756 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1790 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1757 adapter->flags |= FLAG_RESTART_NOW; 1791 adapter->flags |= FLAG_RESTART_NOW;
1758 } 1792 }
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1960 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1994 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1961 if (hw->mac.type == e1000_82574) { 1995 if (hw->mac.type == e1000_82574) {
1962 u32 rfctl = er32(RFCTL); 1996 u32 rfctl = er32(RFCTL);
1997
1963 rfctl |= E1000_RFCTL_ACK_DIS; 1998 rfctl |= E1000_RFCTL_ACK_DIS;
1964 ew32(RFCTL, rfctl); 1999 ew32(RFCTL, rfctl);
1965 } 2000 }
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
2204 2239
2205 if (adapter->msix_entries) { 2240 if (adapter->msix_entries) {
2206 int i; 2241 int i;
2242
2207 for (i = 0; i < adapter->num_vectors; i++) 2243 for (i = 0; i < adapter->num_vectors; i++)
2208 synchronize_irq(adapter->msix_entries[i].vector); 2244 synchronize_irq(adapter->msix_entries[i].vector);
2209 } else { 2245 } else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2921 2957
2922 if (adapter->flags2 & FLAG2_DMA_BURST) { 2958 if (adapter->flags2 & FLAG2_DMA_BURST) {
2923 u32 txdctl = er32(TXDCTL(0)); 2959 u32 txdctl = er32(TXDCTL(0));
2960
2924 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2961 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2925 E1000_TXDCTL_WTHRESH); 2962 E1000_TXDCTL_WTHRESH);
2926 /* set up some performance related parameters to encourage the 2963 /* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3239 3276
3240 if (adapter->flags & FLAG_IS_ICH) { 3277 if (adapter->flags & FLAG_IS_ICH) {
3241 u32 rxdctl = er32(RXDCTL(0)); 3278 u32 rxdctl = er32(RXDCTL(0));
3279
3242 ew32(RXDCTL(0), rxdctl | 0x3); 3280 ew32(RXDCTL(0), rxdctl | 0x3);
3243 } 3281 }
3244 3282
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3303{ 3341{
3304 struct e1000_adapter *adapter = netdev_priv(netdev); 3342 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw; 3343 struct e1000_hw *hw = &adapter->hw;
3306 unsigned int rar_entries = hw->mac.rar_entry_count; 3344 unsigned int rar_entries;
3307 int count = 0; 3345 int count = 0;
3308 3346
3347 rar_entries = hw->mac.ops.rar_get_count(hw);
3348
3309 /* save a rar entry for our hardware address */ 3349 /* save a rar entry for our hardware address */
3310 rar_entries--; 3350 rar_entries--;
3311 3351
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3324 * combining 3364 * combining
3325 */ 3365 */
3326 netdev_for_each_uc_addr(ha, netdev) { 3366 netdev_for_each_uc_addr(ha, netdev) {
3367 int rval;
3368
3327 if (!rar_entries) 3369 if (!rar_entries)
3328 break; 3370 break;
3329 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3371 rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3372 if (rval < 0)
3373 return -ENOMEM;
3330 count++; 3374 count++;
3331 } 3375 }
3332 } 3376 }
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4085 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4129 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4086 cc); 4130 cc);
4087 struct e1000_hw *hw = &adapter->hw; 4131 struct e1000_hw *hw = &adapter->hw;
4088 cycle_t systim; 4132 cycle_t systim, systim_next;
4089 4133
4090 /* latch SYSTIMH on read of SYSTIML */ 4134 /* latch SYSTIMH on read of SYSTIML */
4091 systim = (cycle_t)er32(SYSTIML); 4135 systim = (cycle_t)er32(SYSTIML);
4092 systim |= (cycle_t)er32(SYSTIMH) << 32; 4136 systim |= (cycle_t)er32(SYSTIMH) << 32;
4093 4137
4138 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
4139 u64 incvalue, time_delta, rem, temp;
4140 int i;
4141
4142 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4143 * check to see that the time is incrementing at a reasonable
4144 * rate and is a multiple of incvalue
4145 */
4146 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4147 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4148 /* latch SYSTIMH on read of SYSTIML */
4149 systim_next = (cycle_t)er32(SYSTIML);
4150 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4151
4152 time_delta = systim_next - systim;
4153 temp = time_delta;
4154 rem = do_div(temp, incvalue);
4155
4156 systim = systim_next;
4157
4158 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4159 (rem == 0))
4160 break;
4161 }
4162 }
4094 return systim; 4163 return systim;
4095} 4164}
4096 4165
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
4491 e1000_get_phy_info(hw); 4560 e1000_get_phy_info(hw);
4492 4561
4493 /* Enable EEE on 82579 after link up */ 4562 /* Enable EEE on 82579 after link up */
4494 if (hw->phy.type == e1000_phy_82579) 4563 if (hw->phy.type >= e1000_phy_82579)
4495 e1000_set_eee_pchlan(hw); 4564 e1000_set_eee_pchlan(hw);
4496} 4565}
4497 4566
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4695 /* Correctable ECC Errors */ 4764 /* Correctable ECC Errors */
4696 if (hw->mac.type == e1000_pch_lpt) { 4765 if (hw->mac.type == e1000_pch_lpt) {
4697 u32 pbeccsts = er32(PBECCSTS); 4766 u32 pbeccsts = er32(PBECCSTS);
4767
4698 adapter->corr_errors += 4768 adapter->corr_errors +=
4699 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 4769 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4700 adapter->uncorr_errors += 4770 adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
4808 (adapter->flags & FLAG_RESTART_NOW)) { 4878 (adapter->flags & FLAG_RESTART_NOW)) {
4809 struct e1000_hw *hw = &adapter->hw; 4879 struct e1000_hw *hw = &adapter->hw;
4810 u32 rctl = er32(RCTL); 4880 u32 rctl = er32(RCTL);
4881
4811 ew32(RCTL, rctl | E1000_RCTL_EN); 4882 ew32(RCTL, rctl | E1000_RCTL_EN);
4812 adapter->flags &= ~FLAG_RESTART_NOW; 4883 adapter->flags &= ~FLAG_RESTART_NOW;
4813 } 4884 }
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4930 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 5001 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4931 !txb2b) { 5002 !txb2b) {
4932 u32 tarc0; 5003 u32 tarc0;
5004
4933 tarc0 = er32(TARC(0)); 5005 tarc0 = er32(TARC(0));
4934 tarc0 &= ~SPEED_MODE_BIT; 5006 tarc0 &= ~SPEED_MODE_BIT;
4935 ew32(TARC(0), tarc0); 5007 ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5170 __be16 protocol; 5242 __be16 protocol;
5171 5243
5172 if (skb->ip_summed != CHECKSUM_PARTIAL) 5244 if (skb->ip_summed != CHECKSUM_PARTIAL)
5173 return 0; 5245 return false;
5174 5246
5175 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 5247 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5176 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 5248 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5215 i = 0; 5287 i = 0;
5216 tx_ring->next_to_use = i; 5288 tx_ring->next_to_use = i;
5217 5289
5218 return 1; 5290 return true;
5219} 5291}
5220 5292
5221static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 5293static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6209 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6281 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6210 } else { 6282 } else {
6211 u32 wus = er32(WUS); 6283 u32 wus = er32(WUS);
6284
6212 if (wus) { 6285 if (wus) {
6213 e_info("MAC Wakeup cause - %s\n", 6286 e_info("MAC Wakeup cause - %s\n",
6214 wus & E1000_WUS_EX ? "Unicast Packet" : 6287 wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
7027 .resume = e1000_io_resume, 7100 .resume = e1000_io_resume,
7028}; 7101};
7029 7102
7030static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 7103static const struct pci_device_id e1000_pci_tbl[] = {
7031 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 7104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7032 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 7105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7033 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 7106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
7144static int __init e1000_init_module(void) 7217static int __init e1000_init_module(void)
7145{ 7218{
7146 int ret; 7219 int ret;
7220
7147 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7221 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7148 e1000e_driver_version); 7222 e1000e_driver_version);
7149 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); 7223 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
398 /* Loop to allow for up to whole page write of eeprom */ 398 /* Loop to allow for up to whole page write of eeprom */
399 while (widx < words) { 399 while (widx < words) {
400 u16 word_out = data[widx]; 400 u16 word_out = data[widx];
401
401 word_out = (word_out >> 8) | (word_out << 8); 402 word_out = (word_out >> 8) | (word_out << 8);
402 e1000_shift_out_eec_bits(hw, word_out, 16); 403 e1000_shift_out_eec_bits(hw, word_out, 16);
403 widx++; 404 widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
436 436
437 if (num_IntMode > bd) { 437 if (num_IntMode > bd) {
438 unsigned int int_mode = IntMode[bd]; 438 unsigned int int_mode = IntMode[bd];
439
439 e1000_validate_option(&int_mode, &opt, adapter); 440 e1000_validate_option(&int_mode, &opt, adapter);
440 adapter->int_mode = int_mode; 441 adapter->int_mode = int_mode;
441 } else { 442 } else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
457 458
458 if (num_SmartPowerDownEnable > bd) { 459 if (num_SmartPowerDownEnable > bd) {
459 unsigned int spd = SmartPowerDownEnable[bd]; 460 unsigned int spd = SmartPowerDownEnable[bd];
461
460 e1000_validate_option(&spd, &opt, adapter); 462 e1000_validate_option(&spd, &opt, adapter);
461 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) 463 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
462 adapter->flags |= FLAG_SMART_POWER_DOWN; 464 adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
473 475
474 if (num_CrcStripping > bd) { 476 if (num_CrcStripping > bd) {
475 unsigned int crc_stripping = CrcStripping[bd]; 477 unsigned int crc_stripping = CrcStripping[bd];
478
476 e1000_validate_option(&crc_stripping, &opt, adapter); 479 e1000_validate_option(&crc_stripping, &opt, adapter);
477 if (crc_stripping == OPTION_ENABLED) { 480 if (crc_stripping == OPTION_ENABLED) {
478 adapter->flags2 |= FLAG2_CRC_STRIPPING; 481 adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
495 498
496 if (num_KumeranLockLoss > bd) { 499 if (num_KumeranLockLoss > bd) {
497 unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; 500 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
501
498 e1000_validate_option(&kmrn_lock_loss, &opt, adapter); 502 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
499 enabled = kmrn_lock_loss; 503 enabled = kmrn_lock_loss;
500 } 504 }
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2896 (hw->phy.addr == 2) && 2896 (hw->phy.addr == 2) &&
2897 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { 2897 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
2898 u16 data2 = 0x7EFF; 2898 u16 data2 = 0x7EFF;
2899
2899 ret_val = e1000_access_phy_debug_regs_hv(hw, 2900 ret_val = e1000_access_phy_debug_regs_hv(hw,
2900 (1 << 6) | 0x3, 2901 (1 << 6) | 0x3,
2901 &data2, false); 2902 &data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..65985846345d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
72#define I40E_MIN_NUM_DESCRIPTORS 64 72#define I40E_MIN_NUM_DESCRIPTORS 64
73#define I40E_MIN_MSIX 2 73#define I40E_MIN_MSIX 2
74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ 74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
75#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
75#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ 76#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
76#define I40E_DEFAULT_QUEUES_PER_VF 4 77#define I40E_DEFAULT_QUEUES_PER_VF 4
77#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ 78#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -97,10 +98,6 @@
97#define STRINGIFY(foo) #foo 98#define STRINGIFY(foo) #foo
98#define XSTRINGIFY(bar) STRINGIFY(bar) 99#define XSTRINGIFY(bar) STRINGIFY(bar)
99 100
100#ifndef ARCH_HAS_PREFETCH
101#define prefetch(X)
102#endif
103
104#define I40E_RX_DESC(R, i) \ 101#define I40E_RX_DESC(R, i) \
105 ((ring_is_16byte_desc_enabled(R)) \ 102 ((ring_is_16byte_desc_enabled(R)) \
106 ? (union i40e_32byte_rx_desc *) \ 103 ? (union i40e_32byte_rx_desc *) \
@@ -157,11 +154,23 @@ struct i40e_lump_tracking {
157#define I40E_FDIR_BUFFER_FULL_MARGIN 10 154#define I40E_FDIR_BUFFER_FULL_MARGIN 10
158#define I40E_FDIR_BUFFER_HEAD_ROOM 200 155#define I40E_FDIR_BUFFER_HEAD_ROOM 200
159 156
157enum i40e_fd_stat_idx {
158 I40E_FD_STAT_ATR,
159 I40E_FD_STAT_SB,
160 I40E_FD_STAT_PF_COUNT
161};
162#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
163#define I40E_FD_ATR_STAT_IDX(pf_id) \
164 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
165#define I40E_FD_SB_STAT_IDX(pf_id) \
166 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
167
160struct i40e_fdir_filter { 168struct i40e_fdir_filter {
161 struct hlist_node fdir_node; 169 struct hlist_node fdir_node;
162 /* filter ipnut set */ 170 /* filter ipnut set */
163 u8 flow_type; 171 u8 flow_type;
164 u8 ip4_proto; 172 u8 ip4_proto;
173 /* TX packet view of src and dst */
165 __be32 dst_ip[4]; 174 __be32 dst_ip[4];
166 __be32 src_ip[4]; 175 __be32 src_ip[4];
167 __be16 src_port; 176 __be16 src_port;
@@ -205,7 +214,6 @@ struct i40e_pf {
205 unsigned long state; 214 unsigned long state;
206 unsigned long link_check_timeout; 215 unsigned long link_check_timeout;
207 struct msix_entry *msix_entries; 216 struct msix_entry *msix_entries;
208 u16 num_msix_entries;
209 bool fc_autoneg_status; 217 bool fc_autoneg_status;
210 218
211 u16 eeprom_version; 219 u16 eeprom_version;
@@ -220,11 +228,14 @@ struct i40e_pf {
220 u16 rss_size; /* num queues in the RSS array */ 228 u16 rss_size; /* num queues in the RSS array */
221 u16 rss_size_max; /* HW defined max RSS queues */ 229 u16 rss_size_max; /* HW defined max RSS queues */
222 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ 230 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
231 u16 num_alloc_vsi; /* num VSIs this driver supports */
223 u8 atr_sample_rate; 232 u8 atr_sample_rate;
224 bool wol_en; 233 bool wol_en;
225 234
226 struct hlist_head fdir_filter_list; 235 struct hlist_head fdir_filter_list;
227 u16 fdir_pf_active_filters; 236 u16 fdir_pf_active_filters;
237 u16 fd_sb_cnt_idx;
238 u16 fd_atr_cnt_idx;
228 239
229#ifdef CONFIG_I40E_VXLAN 240#ifdef CONFIG_I40E_VXLAN
230 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 241 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -266,6 +277,7 @@ struct i40e_pf {
266#ifdef CONFIG_I40E_VXLAN 277#ifdef CONFIG_I40E_VXLAN
267#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) 278#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
268#endif 279#endif
280#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
269 281
270 /* tracks features that get auto disabled by errors */ 282 /* tracks features that get auto disabled by errors */
271 u64 auto_disable_flags; 283 u64 auto_disable_flags;
@@ -300,7 +312,6 @@ struct i40e_pf {
300 u16 pf_seid; 312 u16 pf_seid;
301 u16 main_vsi_seid; 313 u16 main_vsi_seid;
302 u16 mac_seid; 314 u16 mac_seid;
303 struct i40e_aqc_get_switch_config_data *sw_config;
304 struct kobject *switch_kobj; 315 struct kobject *switch_kobj;
305#ifdef CONFIG_DEBUG_FS 316#ifdef CONFIG_DEBUG_FS
306 struct dentry *i40e_dbg_pf; 317 struct dentry *i40e_dbg_pf;
@@ -329,9 +340,7 @@ struct i40e_pf {
329 struct ptp_clock *ptp_clock; 340 struct ptp_clock *ptp_clock;
330 struct ptp_clock_info ptp_caps; 341 struct ptp_clock_info ptp_caps;
331 struct sk_buff *ptp_tx_skb; 342 struct sk_buff *ptp_tx_skb;
332 struct work_struct ptp_tx_work;
333 struct hwtstamp_config tstamp_config; 343 struct hwtstamp_config tstamp_config;
334 unsigned long ptp_tx_start;
335 unsigned long last_rx_ptp_check; 344 unsigned long last_rx_ptp_check;
336 spinlock_t tmreg_lock; /* Used to protect the device time registers. */ 345 spinlock_t tmreg_lock; /* Used to protect the device time registers. */
337 u64 ptp_base_adj; 346 u64 ptp_base_adj;
@@ -420,6 +429,7 @@ struct i40e_vsi {
420 struct i40e_q_vector **q_vectors; 429 struct i40e_q_vector **q_vectors;
421 int num_q_vectors; 430 int num_q_vectors;
422 int base_vector; 431 int base_vector;
432 bool irqs_ready;
423 433
424 u16 seid; /* HW index of this VSI (absolute index) */ 434 u16 seid; /* HW index of this VSI (absolute index) */
425 u16 id; /* VSI number */ 435 u16 id; /* VSI number */
@@ -540,6 +550,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
540 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT); 550 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
541} 551}
542 552
553/**
554 * i40e_get_fd_cnt_all - get the total FD filter space available
555 * @pf: pointer to the pf struct
556 **/
557static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
558{
559 return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
560}
561
543/* needed by i40e_ethtool.c */ 562/* needed by i40e_ethtool.c */
544int i40e_up(struct i40e_vsi *vsi); 563int i40e_up(struct i40e_vsi *vsi);
545void i40e_down(struct i40e_vsi *vsi); 564void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..7a027499fc57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
33static void i40e_resume_aq(struct i40e_hw *hw); 33static void i40e_resume_aq(struct i40e_hw *hw);
34 34
35/** 35/**
36 * i40e_is_nvm_update_op - return true if this is an NVM update operation
37 * @desc: API request descriptor
38 **/
39static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
40{
41 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
42 (desc->opcode == i40e_aqc_opc_nvm_update);
43}
44
45/**
36 * i40e_adminq_init_regs - Initialize AdminQ registers 46 * i40e_adminq_init_regs - Initialize AdminQ registers
37 * @hw: pointer to the hardware structure 47 * @hw: pointer to the hardware structure
38 * 48 *
@@ -281,8 +291,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
281 * 291 *
282 * Configure base address and length registers for the transmit queue 292 * Configure base address and length registers for the transmit queue
283 **/ 293 **/
284static void i40e_config_asq_regs(struct i40e_hw *hw) 294static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
285{ 295{
296 i40e_status ret_code = 0;
297 u32 reg = 0;
298
286 if (hw->mac.type == I40E_MAC_VF) { 299 if (hw->mac.type == I40E_MAC_VF) {
287 /* configure the transmit queue */ 300 /* configure the transmit queue */
288 wr32(hw, I40E_VF_ATQBAH1, 301 wr32(hw, I40E_VF_ATQBAH1,
@@ -291,6 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
291 lower_32_bits(hw->aq.asq.desc_buf.pa)); 304 lower_32_bits(hw->aq.asq.desc_buf.pa));
292 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | 305 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
293 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 306 I40E_VF_ATQLEN1_ATQENABLE_MASK));
307 reg = rd32(hw, I40E_VF_ATQBAL1);
294 } else { 308 } else {
295 /* configure the transmit queue */ 309 /* configure the transmit queue */
296 wr32(hw, I40E_PF_ATQBAH, 310 wr32(hw, I40E_PF_ATQBAH,
@@ -299,7 +313,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
299 lower_32_bits(hw->aq.asq.desc_buf.pa)); 313 lower_32_bits(hw->aq.asq.desc_buf.pa));
300 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | 314 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
301 I40E_PF_ATQLEN_ATQENABLE_MASK)); 315 I40E_PF_ATQLEN_ATQENABLE_MASK));
316 reg = rd32(hw, I40E_PF_ATQBAL);
302 } 317 }
318
319 /* Check one register to verify that config was applied */
320 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
321 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
322
323 return ret_code;
303} 324}
304 325
305/** 326/**
@@ -308,8 +329,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
308 * 329 *
309 * Configure base address and length registers for the receive (event queue) 330 * Configure base address and length registers for the receive (event queue)
310 **/ 331 **/
311static void i40e_config_arq_regs(struct i40e_hw *hw) 332static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
312{ 333{
334 i40e_status ret_code = 0;
335 u32 reg = 0;
336
313 if (hw->mac.type == I40E_MAC_VF) { 337 if (hw->mac.type == I40E_MAC_VF) {
314 /* configure the receive queue */ 338 /* configure the receive queue */
315 wr32(hw, I40E_VF_ARQBAH1, 339 wr32(hw, I40E_VF_ARQBAH1,
@@ -318,6 +342,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
318 lower_32_bits(hw->aq.arq.desc_buf.pa)); 342 lower_32_bits(hw->aq.arq.desc_buf.pa));
319 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | 343 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
320 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 344 I40E_VF_ARQLEN1_ARQENABLE_MASK));
345 reg = rd32(hw, I40E_VF_ARQBAL1);
321 } else { 346 } else {
322 /* configure the receive queue */ 347 /* configure the receive queue */
323 wr32(hw, I40E_PF_ARQBAH, 348 wr32(hw, I40E_PF_ARQBAH,
@@ -326,10 +351,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
326 lower_32_bits(hw->aq.arq.desc_buf.pa)); 351 lower_32_bits(hw->aq.arq.desc_buf.pa));
327 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | 352 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
328 I40E_PF_ARQLEN_ARQENABLE_MASK)); 353 I40E_PF_ARQLEN_ARQENABLE_MASK));
354 reg = rd32(hw, I40E_PF_ARQBAL);
329 } 355 }
330 356
331 /* Update tail in the HW to post pre-allocated buffers */ 357 /* Update tail in the HW to post pre-allocated buffers */
332 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 358 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
359
360 /* Check one register to verify that config was applied */
361 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
362 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
363
364 return ret_code;
333} 365}
334 366
335/** 367/**
@@ -377,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
377 goto init_adminq_free_rings; 409 goto init_adminq_free_rings;
378 410
379 /* initialize base registers */ 411 /* initialize base registers */
380 i40e_config_asq_regs(hw); 412 ret_code = i40e_config_asq_regs(hw);
413 if (ret_code)
414 goto init_adminq_free_rings;
381 415
382 /* success! */ 416 /* success! */
383 goto init_adminq_exit; 417 goto init_adminq_exit;
@@ -434,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
434 goto init_adminq_free_rings; 468 goto init_adminq_free_rings;
435 469
436 /* initialize base registers */ 470 /* initialize base registers */
437 i40e_config_arq_regs(hw); 471 ret_code = i40e_config_arq_regs(hw);
472 if (ret_code)
473 goto init_adminq_free_rings;
438 474
439 /* success! */ 475 /* success! */
440 goto init_adminq_exit; 476 goto init_adminq_exit;
@@ -577,14 +613,14 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
577 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 613 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
578 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 614 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
579 615
580 if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR || 616 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
581 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
582 ret_code = I40E_ERR_FIRMWARE_API_VERSION; 617 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
583 goto init_adminq_free_arq; 618 goto init_adminq_free_arq;
584 } 619 }
585 620
586 /* pre-emptive resource lock release */ 621 /* pre-emptive resource lock release */
587 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 622 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
623 hw->aq.nvm_busy = false;
588 624
589 ret_code = i40e_aq_set_hmc_resource_profile(hw, 625 ret_code = i40e_aq_set_hmc_resource_profile(hw,
590 I40E_HMC_PROFILE_DEFAULT, 626 I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
708 goto asq_send_command_exit; 744 goto asq_send_command_exit;
709 } 745 }
710 746
747 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
748 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
749 status = I40E_ERR_NVM;
750 goto asq_send_command_exit;
751 }
752
711 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 753 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
712 if (cmd_details) { 754 if (cmd_details) {
713 *details = *cmd_details; 755 *details = *cmd_details;
@@ -835,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
835 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 877 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
836 } 878 }
837 879
880 if (i40e_is_nvm_update_op(desc))
881 hw->aq.nvm_busy = true;
882
838 /* update the error if time out occurred */ 883 /* update the error if time out occurred */
839 if ((!cmd_completed) && 884 if ((!cmd_completed) &&
840 (!details->async && !details->postpone)) { 885 (!details->async && !details->postpone)) {
@@ -929,6 +974,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
929 e->msg_size); 974 e->msg_size);
930 } 975 }
931 976
977 if (i40e_is_nvm_update_op(&e->desc))
978 hw->aq.nvm_busy = false;
979
932 /* Restore the original datalen and buffer address in the desc, 980 /* Restore the original datalen and buffer address in the desc,
933 * FW updates datalen to indicate the event message 981 * FW updates datalen to indicate the event message
934 * size 982 * size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
90 u16 fw_min_ver; /* firmware minor version */ 90 u16 fw_min_ver; /* firmware minor version */
91 u16 api_maj_ver; /* api major version */ 91 u16 api_maj_ver; /* api major version */
92 u16 api_min_ver; /* api minor version */ 92 u16 api_min_ver; /* api minor version */
93 bool nvm_busy;
93 94
94 struct mutex asq_mutex; /* Send queue lock */ 95 struct mutex asq_mutex; /* Send queue lock */
95 struct mutex arq_mutex; /* Receive queue lock */ 96 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..15f289f2917f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
34 */ 34 */
35 35
36#define I40E_FW_API_VERSION_MAJOR 0x0001 36#define I40E_FW_API_VERSION_MAJOR 0x0001
37#define I40E_FW_API_VERSION_MINOR 0x0001 37#define I40E_FW_API_VERSION_MINOR 0x0002
38 38
39struct i40e_aq_desc { 39struct i40e_aq_desc {
40 __le16 flags; 40 __le16 flags;
@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
123 i40e_aqc_opc_get_version = 0x0001, 123 i40e_aqc_opc_get_version = 0x0001,
124 i40e_aqc_opc_driver_version = 0x0002, 124 i40e_aqc_opc_driver_version = 0x0002,
125 i40e_aqc_opc_queue_shutdown = 0x0003, 125 i40e_aqc_opc_queue_shutdown = 0x0003,
126 i40e_aqc_opc_set_pf_context = 0x0004,
126 127
127 /* resource ownership */ 128 /* resource ownership */
128 i40e_aqc_opc_request_resource = 0x0008, 129 i40e_aqc_opc_request_resource = 0x0008,
@@ -182,9 +183,6 @@ enum i40e_admin_queue_opc {
182 i40e_aqc_opc_add_mirror_rule = 0x0260, 183 i40e_aqc_opc_add_mirror_rule = 0x0260,
183 i40e_aqc_opc_delete_mirror_rule = 0x0261, 184 i40e_aqc_opc_delete_mirror_rule = 0x0261,
184 185
185 i40e_aqc_opc_set_storm_control_config = 0x0280,
186 i40e_aqc_opc_get_storm_control_config = 0x0281,
187
188 /* DCB commands */ 186 /* DCB commands */
189 i40e_aqc_opc_dcb_ignore_pfc = 0x0301, 187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
190 i40e_aqc_opc_dcb_updated = 0x0302, 188 i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +205,7 @@ enum i40e_admin_queue_opc {
207 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, 205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
208 i40e_aqc_opc_suspend_port_tx = 0x041B, 206 i40e_aqc_opc_suspend_port_tx = 0x041B,
209 i40e_aqc_opc_resume_port_tx = 0x041C, 207 i40e_aqc_opc_resume_port_tx = 0x041C,
208 i40e_aqc_opc_configure_partition_bw = 0x041D,
210 209
211 /* hmc */ 210 /* hmc */
212 i40e_aqc_opc_query_hmc_resource_profile = 0x0500, 211 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -224,13 +223,15 @@ enum i40e_admin_queue_opc {
224 i40e_aqc_opc_get_partner_advt = 0x0616, 223 i40e_aqc_opc_get_partner_advt = 0x0616,
225 i40e_aqc_opc_set_lb_modes = 0x0618, 224 i40e_aqc_opc_set_lb_modes = 0x0618,
226 i40e_aqc_opc_get_phy_wol_caps = 0x0621, 225 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
227 i40e_aqc_opc_set_phy_reset = 0x0622, 226 i40e_aqc_opc_set_phy_debug = 0x0622,
228 i40e_aqc_opc_upload_ext_phy_fm = 0x0625, 227 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
229 228
230 /* NVM commands */ 229 /* NVM commands */
231 i40e_aqc_opc_nvm_read = 0x0701, 230 i40e_aqc_opc_nvm_read = 0x0701,
232 i40e_aqc_opc_nvm_erase = 0x0702, 231 i40e_aqc_opc_nvm_erase = 0x0702,
233 i40e_aqc_opc_nvm_update = 0x0703, 232 i40e_aqc_opc_nvm_update = 0x0703,
233 i40e_aqc_opc_nvm_config_read = 0x0704,
234 i40e_aqc_opc_nvm_config_write = 0x0705,
234 235
235 /* virtualization commands */ 236 /* virtualization commands */
236 i40e_aqc_opc_send_msg_to_pf = 0x0801, 237 i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -272,8 +273,6 @@ enum i40e_admin_queue_opc {
272 i40e_aqc_opc_debug_set_mode = 0xFF01, 273 i40e_aqc_opc_debug_set_mode = 0xFF01,
273 i40e_aqc_opc_debug_read_reg = 0xFF03, 274 i40e_aqc_opc_debug_read_reg = 0xFF03,
274 i40e_aqc_opc_debug_write_reg = 0xFF04, 275 i40e_aqc_opc_debug_write_reg = 0xFF04,
275 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
276 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 276 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 277 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09, 278 i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -341,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
341 340
342I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); 341I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
343 342
343/* Set PF context (0x0004, direct) */
344struct i40e_aqc_set_pf_context {
345 u8 pf_id;
346 u8 reserved[15];
347};
348
349I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
350
344/* Request resource ownership (direct 0x0008) 351/* Request resource ownership (direct 0x0008)
345 * Release resource ownership (direct 0x0009) 352 * Release resource ownership (direct 0x0009)
346 */ 353 */
@@ -1289,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
1289 1296
1290I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); 1297I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1291 1298
1292/* Set Storm Control Configuration (direct 0x0280)
1293 * Get Storm Control Configuration (direct 0x0281)
1294 * the command and response use the same descriptor structure
1295 */
1296struct i40e_aqc_set_get_storm_control_config {
1297 __le32 broadcast_threshold;
1298 __le32 multicast_threshold;
1299 __le32 control_flags;
1300#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1301#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1302#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1303#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1304#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1305#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1306#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1307 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1308 u8 reserved[4];
1309};
1310
1311I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1312
1313/* DCB 0x03xx*/ 1299/* DCB 0x03xx*/
1314 1300
1315/* PFC Ignore (direct 0x0301) 1301/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1427struct i40e_aqc_configure_switching_comp_ets_data { 1413struct i40e_aqc_configure_switching_comp_ets_data {
1428 u8 reserved[4]; 1414 u8 reserved[4];
1429 u8 tc_valid_bits; 1415 u8 tc_valid_bits;
1430 u8 reserved1; 1416 u8 seepage;
1417#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
1431 u8 tc_strict_priority_flags; 1418 u8 tc_strict_priority_flags;
1432 u8 reserved2[17]; 1419 u8 reserved1[17];
1433 u8 tc_bw_share_credits[8]; 1420 u8 tc_bw_share_credits[8];
1434 u8 reserved3[96]; 1421 u8 reserved2[96];
1435}; 1422};
1436 1423
1437/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1499 * (direct 0x041B and 0x041C) uses the generic SEID struct 1486 * (direct 0x041B and 0x041C) uses the generic SEID struct
1500 */ 1487 */
1501 1488
1489/* Configure partition BW
1490 * (indirect 0x041D)
1491 */
1492struct i40e_aqc_configure_partition_bw_data {
1493 __le16 pf_valid_bits;
1494 u8 min_bw[16]; /* guaranteed bandwidth */
1495 u8 max_bw[16]; /* bandwidth limit */
1496};
1497
1502/* Get and set the active HMC resource profile and status. 1498/* Get and set the active HMC resource profile and status.
1503 * (direct 0x0500) and (direct 0x0501) 1499 * (direct 0x0500) and (direct 0x0501)
1504 */ 1500 */
@@ -1539,6 +1535,8 @@ enum i40e_aq_phy_type {
1539 I40E_PHY_TYPE_XLPPI = 0x9, 1535 I40E_PHY_TYPE_XLPPI = 0x9,
1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, 1536 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, 1537 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1538 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
1539 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
1542 I40E_PHY_TYPE_100BASE_TX = 0x11, 1540 I40E_PHY_TYPE_100BASE_TX = 0x11,
1543 I40E_PHY_TYPE_1000BASE_T = 0x12, 1541 I40E_PHY_TYPE_1000BASE_T = 0x12,
1544 I40E_PHY_TYPE_10GBASE_T = 0x13, 1542 I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1547,10 @@ enum i40e_aq_phy_type {
1549 I40E_PHY_TYPE_40GBASE_CR4 = 0x18, 1547 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1550 I40E_PHY_TYPE_40GBASE_SR4 = 0x19, 1548 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1551 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, 1549 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1552 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, 1550 I40E_PHY_TYPE_1000BASE_SX = 0x1B,
1551 I40E_PHY_TYPE_1000BASE_LX = 0x1C,
1552 I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
1553 I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
1553 I40E_PHY_TYPE_MAX 1554 I40E_PHY_TYPE_MAX
1554}; 1555};
1555 1556
@@ -1583,11 +1584,8 @@ struct i40e_aq_get_phy_abilities_resp {
1583#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 1584#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1584#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 1585#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1585#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 1586#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1586#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 1587#define I40E_AQ_PHY_LINK_ENABLED 0x08
1587#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) 1588#define I40E_AQ_PHY_AN_ENABLED 0x10
1588#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1589#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1590#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1591#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 1589#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1592 __le16 eee_capability; 1590 __le16 eee_capability;
1593#define I40E_AQ_EEE_100BASE_TX 0x0002 1591#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1694,7 @@ struct i40e_aqc_get_link_status {
1696#define I40E_AQ_LINK_TX_ACTIVE 0x00 1694#define I40E_AQ_LINK_TX_ACTIVE 0x00
1697#define I40E_AQ_LINK_TX_DRAINED 0x01 1695#define I40E_AQ_LINK_TX_DRAINED 0x01
1698#define I40E_AQ_LINK_TX_FLUSHED 0x03 1696#define I40E_AQ_LINK_TX_FLUSHED 0x03
1697#define I40E_AQ_LINK_FORCED_40G 0x10
1699 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ 1698 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1700 __le16 max_frame_size; 1699 __le16 max_frame_size;
1701 u8 config; 1700 u8 config;
@@ -1747,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
1747 1746
1748I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); 1747I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1749 1748
1750/* Set PHY Reset command (0x0622) */ 1749/* Set PHY Debug command (0x0622) */
1751struct i40e_aqc_set_phy_reset { 1750struct i40e_aqc_set_phy_debug {
1752 u8 reset_flags; 1751 u8 command_flags;
1753#define I40E_AQ_PHY_RESET_REQUEST 0x02 1752#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
1753#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
1754#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
1755 I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
1756#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
1757#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
1758#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
1759#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
1754 u8 reserved[15]; 1760 u8 reserved[15];
1755}; 1761};
1756 1762
1757I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); 1763I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
1758 1764
1759enum i40e_aq_phy_reg_type { 1765enum i40e_aq_phy_reg_type {
1760 I40E_AQC_PHY_REG_INTERNAL = 0x1, 1766 I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1785,47 @@ struct i40e_aqc_nvm_update {
1779 1785
1780I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); 1786I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1781 1787
1788/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2];
1797 __le32 address_high;
1798 __le32 address_low;
1799};
1800
1801I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
1802
1803/* NVM Config Write (indirect 0x0705) */
1804struct i40e_aqc_nvm_config_write {
1805 __le16 cmd_flags;
1806 __le16 element_count;
1807 u8 reserved[4];
1808 __le32 address_high;
1809 __le32 address_low;
1810};
1811
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813
1814struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id;
1816 __le16 instance_id;
1817 __le16 feature_options;
1818 __le16 feature_selection;
1819};
1820
1821struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
1823 __le16 field_id;
1824 __le16 instance_id;
1825 __le16 field_options;
1826 __le16 field_value;
1827};
1828
1782/* Send to PF command (indirect 0x0801) id is only used by PF 1829/* Send to PF command (indirect 0x0801) id is only used by PF
1783 * Send to VF command (indirect 0x0802) id is only used by PF 1830 * Send to VF command (indirect 0x0802) id is only used by PF
1784 * Send to Peer PF command (indirect 0x0803) 1831 * Send to Peer PF command (indirect 0x0803)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..6e65f19dd6e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
44 switch (hw->device_id) { 44 switch (hw->device_id) {
45 case I40E_DEV_ID_SFP_XL710: 45 case I40E_DEV_ID_SFP_XL710:
46 case I40E_DEV_ID_SFP_X710:
47 case I40E_DEV_ID_QEMU: 46 case I40E_DEV_ID_QEMU:
48 case I40E_DEV_ID_KX_A: 47 case I40E_DEV_ID_KX_A:
49 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_B:
50 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_KX_C:
51 case I40E_DEV_ID_KX_D:
52 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_A:
53 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_B:
54 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_QSFP_C:
@@ -133,7 +131,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
133 **/ 131 **/
134bool i40e_check_asq_alive(struct i40e_hw *hw) 132bool i40e_check_asq_alive(struct i40e_hw *hw)
135{ 133{
136 return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); 134 if (hw->aq.asq.len)
135 return !!(rd32(hw, hw->aq.asq.len) &
136 I40E_PF_ATQLEN_ATQENABLE_MASK);
137 else
138 return false;
137} 139}
138 140
139/** 141/**
@@ -653,6 +655,36 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
653} 655}
654 656
655/** 657/**
658 * i40e_pre_tx_queue_cfg - pre tx queue configure
659 * @hw: pointer to the HW structure
660 * @queue: target pf queue index
661 * @enable: state change request
662 *
663 * Handles hw requirement to indicate intention to enable
664 * or disable target queue.
665 **/
666void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
667{
668 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
669 u32 reg_block = 0;
670 u32 reg_val;
671
672 if (abs_queue_idx >= 128)
673 reg_block = abs_queue_idx / 128;
674
675 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
676 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
677 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
678
679 if (enable)
680 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
681 else
682 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
683
684 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
685}
686
687/**
656 * i40e_get_media_type - Gets media type 688 * i40e_get_media_type - Gets media type
657 * @hw: pointer to the hardware structure 689 * @hw: pointer to the hardware structure
658 **/ 690 **/
@@ -699,7 +731,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
699} 731}
700 732
701#define I40E_PF_RESET_WAIT_COUNT_A0 200 733#define I40E_PF_RESET_WAIT_COUNT_A0 200
702#define I40E_PF_RESET_WAIT_COUNT 10 734#define I40E_PF_RESET_WAIT_COUNT 100
703/** 735/**
704 * i40e_pf_reset - Reset the PF 736 * i40e_pf_reset - Reset the PF
705 * @hw: pointer to the hardware structure 737 * @hw: pointer to the hardware structure
@@ -789,6 +821,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
789{ 821{
790 u32 reg; 822 u32 reg;
791 823
824 if (i40e_check_asq_alive(hw))
825 i40e_aq_clear_pxe_mode(hw, NULL);
826
792 /* Clear single descriptor fetch/write-back mode */ 827 /* Clear single descriptor fetch/write-back mode */
793 reg = rd32(hw, I40E_GLLAN_RCTL_0); 828 reg = rd32(hw, I40E_GLLAN_RCTL_0);
794 829
@@ -907,6 +942,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
907/* Admin command wrappers */ 942/* Admin command wrappers */
908 943
909/** 944/**
945 * i40e_aq_clear_pxe_mode
946 * @hw: pointer to the hw struct
947 * @cmd_details: pointer to command details structure or NULL
948 *
949 * Tell the firmware that the driver is taking over from PXE
950 **/
951i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
952 struct i40e_asq_cmd_details *cmd_details)
953{
954 i40e_status status;
955 struct i40e_aq_desc desc;
956 struct i40e_aqc_clear_pxe *cmd =
957 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
958
959 i40e_fill_default_direct_cmd_desc(&desc,
960 i40e_aqc_opc_clear_pxe_mode);
961
962 cmd->rx_cnt = 0x2;
963
964 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
965
966 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
967
968 return status;
969}
970
971/**
910 * i40e_aq_set_link_restart_an 972 * i40e_aq_set_link_restart_an
911 * @hw: pointer to the hw struct 973 * @hw: pointer to the hw struct
912 * @cmd_details: pointer to command details structure or NULL 974 * @cmd_details: pointer to command details structure or NULL
@@ -975,6 +1037,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
975 hw_link_info->an_info = resp->an_info; 1037 hw_link_info->an_info = resp->an_info;
976 hw_link_info->ext_info = resp->ext_info; 1038 hw_link_info->ext_info = resp->ext_info;
977 hw_link_info->loopback = resp->loopback; 1039 hw_link_info->loopback = resp->loopback;
1040 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1041 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1042
1043 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1044 hw_link_info->crc_enable = true;
1045 else
1046 hw_link_info->crc_enable = false;
978 1047
979 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) 1048 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
980 hw_link_info->lse_enable = true; 1049 hw_link_info->lse_enable = true;
@@ -1021,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1021 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1090 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1022 1091
1023 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1092 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1024 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1025 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1026 1093
1027 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1094 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1028 sizeof(vsi_ctx->info), cmd_details); 1095 sizeof(vsi_ctx->info), cmd_details);
@@ -1163,8 +1230,6 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
1163 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 1230 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1164 1231
1165 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1232 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1166 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1167 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1168 1233
1169 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1234 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1170 sizeof(vsi_ctx->info), NULL); 1235 sizeof(vsi_ctx->info), NULL);
@@ -1203,8 +1268,6 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
1203 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 1268 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1204 1269
1205 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1270 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1206 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1207 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1208 1271
1209 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1272 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1210 sizeof(vsi_ctx->info), cmd_details); 1273 sizeof(vsi_ctx->info), cmd_details);
@@ -1300,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
1300 struct i40e_aqc_driver_version *cmd = 1363 struct i40e_aqc_driver_version *cmd =
1301 (struct i40e_aqc_driver_version *)&desc.params.raw; 1364 (struct i40e_aqc_driver_version *)&desc.params.raw;
1302 i40e_status status; 1365 i40e_status status;
1366 u16 len;
1303 1367
1304 if (dv == NULL) 1368 if (dv == NULL)
1305 return I40E_ERR_PARAM; 1369 return I40E_ERR_PARAM;
@@ -1311,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
1311 cmd->driver_minor_ver = dv->minor_version; 1375 cmd->driver_minor_ver = dv->minor_version;
1312 cmd->driver_build_ver = dv->build_version; 1376 cmd->driver_build_ver = dv->build_version;
1313 cmd->driver_subbuild_ver = dv->subbuild_version; 1377 cmd->driver_subbuild_ver = dv->subbuild_version;
1314 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1378
1379 len = 0;
1380 while (len < sizeof(dv->driver_string) &&
1381 (dv->driver_string[len] < 0x80) &&
1382 dv->driver_string[len])
1383 len++;
1384 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
1385 len, cmd_details);
1315 1386
1316 return status; 1387 return status;
1317} 1388}
@@ -1900,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
1900 } 1971 }
1901 } 1972 }
1902 1973
1974 /* Software override ensuring FCoE is disabled if npar or mfp
1975 * mode because it is not supported in these modes.
1976 */
1977 if (p->npar_enable || p->mfp_mode_1)
1978 p->fcoe = false;
1979
1903 /* additional HW specific goodies that might 1980 /* additional HW specific goodies that might
1904 * someday be HW version specific 1981 * someday be HW version specific
1905 */ 1982 */
@@ -2094,8 +2171,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
2094 * @cmd_details: pointer to command details structure or NULL 2171 * @cmd_details: pointer to command details structure or NULL
2095 **/ 2172 **/
2096i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 2173i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2097 u16 udp_port, u8 header_len, 2174 u16 udp_port, u8 protocol_index,
2098 u8 protocol_index, u8 *filter_index, 2175 u8 *filter_index,
2099 struct i40e_asq_cmd_details *cmd_details) 2176 struct i40e_asq_cmd_details *cmd_details)
2100{ 2177{
2101 struct i40e_aq_desc desc; 2178 struct i40e_aq_desc desc;
@@ -2253,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
2253} 2330}
2254 2331
2255/** 2332/**
2333 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
2334 * @hw: pointer to the hw struct
2335 * @seid: VSI seid
2336 * @credit: BW limit credits (0 = disabled)
2337 * @max_credit: Max BW limit credits
2338 * @cmd_details: pointer to command details structure or NULL
2339 **/
2340i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
2341 u16 seid, u16 credit, u8 max_credit,
2342 struct i40e_asq_cmd_details *cmd_details)
2343{
2344 struct i40e_aq_desc desc;
2345 struct i40e_aqc_configure_vsi_bw_limit *cmd =
2346 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
2347 i40e_status status;
2348
2349 i40e_fill_default_direct_cmd_desc(&desc,
2350 i40e_aqc_opc_configure_vsi_bw_limit);
2351
2352 cmd->vsi_seid = cpu_to_le16(seid);
2353 cmd->credit = cpu_to_le16(credit);
2354 cmd->max_credit = max_credit;
2355
2356 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2357
2358 return status;
2359}
2360
2361/**
2256 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 2362 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
2257 * @hw: pointer to the hw struct 2363 * @hw: pointer to the hw struct
2258 * @seid: VSI seid 2364 * @seid: VSI seid
@@ -2405,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
2405{ 2511{
2406 u32 fcoe_cntx_size, fcoe_filt_size; 2512 u32 fcoe_cntx_size, fcoe_filt_size;
2407 u32 pe_cntx_size, pe_filt_size; 2513 u32 pe_cntx_size, pe_filt_size;
2408 u32 fcoe_fmax, pe_fmax; 2514 u32 fcoe_fmax;
2409 u32 val; 2515 u32 val;
2410 2516
2411 /* Validate FCoE settings passed */ 2517 /* Validate FCoE settings passed */
@@ -2480,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
2480 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 2586 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
2481 return I40E_ERR_INVALID_SIZE; 2587 return I40E_ERR_INVALID_SIZE;
2482 2588
2483 /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
2484 val = rd32(hw, I40E_GLHMC_PEXFMAX);
2485 pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
2486 >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
2487 if (pe_filt_size + pe_cntx_size > pe_fmax)
2488 return I40E_ERR_INVALID_SIZE;
2489
2490 return 0; 2589 return 0;
2491} 2590}
2492 2591
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103abfd0d..00bc0cdb3a03 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
232 struct i40e_ieee_app_priority_table *app) 232 struct i40e_ieee_app_priority_table *app)
233{ 233{
234 int v, err; 234 int v, err;
235 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 235 for (v = 0; v < pf->num_alloc_vsi; v++) {
236 if (pf->vsi[v] && pf->vsi[v]->netdev) { 236 if (pf->vsi[v] && pf->vsi[v]->netdev) {
237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); 237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
238 if (err) 238 if (err)
@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
302 struct net_device *dev = vsi->netdev; 302 struct net_device *dev = vsi->netdev;
303 struct i40e_pf *pf = i40e_netdev_to_pf(dev); 303 struct i40e_pf *pf = i40e_netdev_to_pf(dev);
304 304
305 /* DCB not enabled */ 305 /* Not DCB capable */
306 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 306 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
307 return; 307 return;
308 308
309 /* Do not setup DCB NL ops for MFP mode */ 309 /* Do not setup DCB NL ops for MFP mode */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..cffdfc21290f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
45 if (seid < 0) 45 if (seid < 0)
46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
47 else 47 else
48 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 48 for (i = 0; i < pf->num_alloc_vsi; i++)
49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) 49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
50 return pf->vsi[i]; 50 return pf->vsi[i];
51 51
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
843{ 843{
844 int i; 844 int i;
845 845
846 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 846 for (i = 0; i < pf->num_alloc_vsi; i++)
847 if (pf->vsi[i]) 847 if (pf->vsi[i])
848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", 848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
849 i, pf->vsi[i]->seid); 849 i, pf->vsi[i]->seid);
@@ -862,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
862 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", 862 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
863 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); 863 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
864 dev_info(&pf->pdev->dev, 864 dev_info(&pf->pdev->dev,
865 " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n", 865 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
866 estats->rx_broadcast, estats->rx_discards, estats->rx_errors); 866 estats->rx_broadcast, estats->rx_discards);
867 dev_info(&pf->pdev->dev, 867 dev_info(&pf->pdev->dev,
868 " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", 868 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
869 estats->rx_missed, estats->rx_unknown_protocol, 869 estats->rx_unknown_protocol, estats->tx_bytes);
870 estats->tx_bytes);
871 dev_info(&pf->pdev->dev, 870 dev_info(&pf->pdev->dev,
872 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", 871 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
873 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); 872 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -1527,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1527 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1526 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1528 if (cnt == 0) { 1527 if (cnt == 0) {
1529 int i; 1528 int i;
1530 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 1529 for (i = 0; i < pf->num_alloc_vsi; i++)
1531 i40e_vsi_reset_stats(pf->vsi[i]); 1530 i40e_vsi_reset_stats(pf->vsi[i]);
1532 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1531 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1533 } else if (cnt == 1) { 1532 } else if (cnt == 1) {
@@ -1744,10 +1743,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1744 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); 1743 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
1745 } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { 1744 } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
1746 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); 1745 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
1747 } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
1748 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
1749 } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
1750 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
1751 } else if (strncmp(cmd_buf, "lldp", 4) == 0) { 1746 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1752 if (strncmp(&cmd_buf[5], "stop", 4) == 0) { 1747 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1753 int ret; 1748 int ret;
@@ -1967,8 +1962,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1967 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); 1962 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
1968 dev_info(&pf->pdev->dev, " fd-atr off\n"); 1963 dev_info(&pf->pdev->dev, " fd-atr off\n");
1969 dev_info(&pf->pdev->dev, " fd-atr on\n"); 1964 dev_info(&pf->pdev->dev, " fd-atr on\n");
1970 dev_info(&pf->pdev->dev, " fd-sb off\n");
1971 dev_info(&pf->pdev->dev, " fd-sb on\n");
1972 dev_info(&pf->pdev->dev, " lldp start\n"); 1965 dev_info(&pf->pdev->dev, " lldp start\n");
1973 dev_info(&pf->pdev->dev, " lldp stop\n"); 1966 dev_info(&pf->pdev->dev, " lldp stop\n");
1974 dev_info(&pf->pdev->dev, " lldp get local\n"); 1967 dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index b2380daef8c1..56438bd579e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -67,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
67 67
68struct i40e_diag_reg_test_info i40e_reg_list[] = { 68struct i40e_diag_reg_test_info i40e_reg_list[] = {
69 /* offset mask elements stride */ 69 /* offset mask elements stride */
70 {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, 70 {I40E_QTX_CTL(0), 0x0000FFBF, 1,
71 {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, 71 I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
72 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, 72 {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
73 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, 73 I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
74 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, 74 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
75 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, 75 I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
76 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, 76 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
77 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, 77 I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
78 {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, 78 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
79 {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, 79 I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
80 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, 80 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
81 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
82 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
83 I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
84 {I40E_QINT_TQCTL(0), 0x000000FF, 1,
85 I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
86 {I40E_QINT_RQCTL(0), 0x000000FF, 1,
87 I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
88 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
81 { 0 } 89 { 0 }
82}; 90};
83 91
@@ -93,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
93 u32 reg, mask; 101 u32 reg, mask;
94 u32 i, j; 102 u32 i, j;
95 103
96 for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) { 104 for (i = 0; i40e_reg_list[i].offset != 0 &&
105 !ret_code; i++) {
106
107 /* set actual reg range for dynamically allocated resources */
108 if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
109 hw->func_caps.num_tx_qp != 0)
110 i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
111 if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
112 i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
113 i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
114 i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
115 i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
116 hw->func_caps.num_msix_vectors != 0)
117 i40e_reg_list[i].elements =
118 hw->func_caps.num_msix_vectors - 1;
119
120 /* test register access */
97 mask = i40e_reg_list[i].mask; 121 mask = i40e_reg_list[i].mask;
98 for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) { 122 for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
99 reg = i40e_reg_list[i].offset + 123 reg = i40e_reg_list[i].offset +
100 (j * i40e_reg_list[i].stride); 124 (j * i40e_reg_list[i].stride);
101 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); 125 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..4a488ffcd6b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -46,6 +46,8 @@ struct i40e_stats {
46 I40E_STAT(struct i40e_pf, _name, _stat) 46 I40E_STAT(struct i40e_pf, _name, _stat)
47#define I40E_VSI_STAT(_name, _stat) \ 47#define I40E_VSI_STAT(_name, _stat) \
48 I40E_STAT(struct i40e_vsi, _name, _stat) 48 I40E_STAT(struct i40e_vsi, _name, _stat)
49#define I40E_VEB_STAT(_name, _stat) \
50 I40E_STAT(struct i40e_veb, _name, _stat)
49 51
50static const struct i40e_stats i40e_gstrings_net_stats[] = { 52static const struct i40e_stats i40e_gstrings_net_stats[] = {
51 I40E_NETDEV_STAT(rx_packets), 53 I40E_NETDEV_STAT(rx_packets),
@@ -56,12 +58,36 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
56 I40E_NETDEV_STAT(tx_errors), 58 I40E_NETDEV_STAT(tx_errors),
57 I40E_NETDEV_STAT(rx_dropped), 59 I40E_NETDEV_STAT(rx_dropped),
58 I40E_NETDEV_STAT(tx_dropped), 60 I40E_NETDEV_STAT(tx_dropped),
59 I40E_NETDEV_STAT(multicast),
60 I40E_NETDEV_STAT(collisions), 61 I40E_NETDEV_STAT(collisions),
61 I40E_NETDEV_STAT(rx_length_errors), 62 I40E_NETDEV_STAT(rx_length_errors),
62 I40E_NETDEV_STAT(rx_crc_errors), 63 I40E_NETDEV_STAT(rx_crc_errors),
63}; 64};
64 65
66static const struct i40e_stats i40e_gstrings_veb_stats[] = {
67 I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
68 I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
69 I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
70 I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
71 I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
72 I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
73 I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
74 I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
75 I40E_VEB_STAT("rx_discards", stats.rx_discards),
76 I40E_VEB_STAT("tx_discards", stats.tx_discards),
77 I40E_VEB_STAT("tx_errors", stats.tx_errors),
78 I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
79};
80
81static const struct i40e_stats i40e_gstrings_misc_stats[] = {
82 I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
83 I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
84 I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
85 I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
86 I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
87 I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
88 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
89};
90
65static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, 91static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
66 struct ethtool_rxnfc *cmd); 92 struct ethtool_rxnfc *cmd);
67 93
@@ -78,7 +104,12 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
78static struct i40e_stats i40e_gstrings_stats[] = { 104static struct i40e_stats i40e_gstrings_stats[] = {
79 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), 105 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
80 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), 106 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
81 I40E_PF_STAT("rx_errors", stats.eth.rx_errors), 107 I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
108 I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
109 I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
110 I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
111 I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
112 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
82 I40E_PF_STAT("tx_errors", stats.eth.tx_errors), 113 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
83 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), 114 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
84 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), 115 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -88,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
88 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 119 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
89 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 120 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
90 I40E_PF_STAT("tx_timeout", tx_timeout_count), 121 I40E_PF_STAT("tx_timeout", tx_timeout_count),
122 I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
91 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 123 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
92 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 124 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
93 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 125 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -112,8 +144,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
112 I40E_PF_STAT("rx_oversize", stats.rx_oversize), 144 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
113 I40E_PF_STAT("rx_jabber", stats.rx_jabber), 145 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
114 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 146 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
115 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
116 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
149 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
150
117 /* LPI stats */ 151 /* LPI stats */
118 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), 152 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
119 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), 153 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
@@ -122,11 +156,14 @@ static struct i40e_stats i40e_gstrings_stats[] = {
122}; 156};
123 157
124#define I40E_QUEUE_STATS_LEN(n) \ 158#define I40E_QUEUE_STATS_LEN(n) \
125 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \ 159 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
126 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2) 160 * 2 /* Tx and Rx together */ \
161 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
127#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) 162#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
128#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) 163#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
164#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
129#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 165#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
166 I40E_MISC_STATS_LEN + \
130 I40E_QUEUE_STATS_LEN((n))) 167 I40E_QUEUE_STATS_LEN((n)))
131#define I40E_PFC_STATS_LEN ( \ 168#define I40E_PFC_STATS_LEN ( \
132 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ 169 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -135,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
135 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ 172 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
136 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ 173 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
137 / sizeof(u64)) 174 / sizeof(u64))
175#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
138#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ 176#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
139 I40E_PFC_STATS_LEN + \ 177 I40E_PFC_STATS_LEN + \
140 I40E_VSI_STATS_LEN((n))) 178 I40E_VSI_STATS_LEN((n)))
@@ -620,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
620 case ETH_SS_TEST: 658 case ETH_SS_TEST:
621 return I40E_TEST_LEN; 659 return I40E_TEST_LEN;
622 case ETH_SS_STATS: 660 case ETH_SS_STATS:
623 if (vsi == pf->vsi[pf->lan_vsi]) 661 if (vsi == pf->vsi[pf->lan_vsi]) {
624 return I40E_PF_STATS_LEN(netdev); 662 int len = I40E_PF_STATS_LEN(netdev);
625 else 663
664 if (pf->lan_veb != I40E_NO_VEB)
665 len += I40E_VEB_STATS_LEN;
666 return len;
667 } else {
626 return I40E_VSI_STATS_LEN(netdev); 668 return I40E_VSI_STATS_LEN(netdev);
669 }
627 default: 670 default:
628 return -EOPNOTSUPP; 671 return -EOPNOTSUPP;
629 } 672 }
@@ -633,6 +676,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
633 struct ethtool_stats *stats, u64 *data) 676 struct ethtool_stats *stats, u64 *data)
634{ 677{
635 struct i40e_netdev_priv *np = netdev_priv(netdev); 678 struct i40e_netdev_priv *np = netdev_priv(netdev);
679 struct i40e_ring *tx_ring, *rx_ring;
636 struct i40e_vsi *vsi = np->vsi; 680 struct i40e_vsi *vsi = np->vsi;
637 struct i40e_pf *pf = vsi->back; 681 struct i40e_pf *pf = vsi->back;
638 int i = 0; 682 int i = 0;
@@ -648,10 +692,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
648 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == 692 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
649 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 693 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
650 } 694 }
695 for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
696 p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
697 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
698 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
699 }
651 rcu_read_lock(); 700 rcu_read_lock();
652 for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) { 701 for (j = 0; j < vsi->num_queue_pairs; j++) {
653 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); 702 tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
654 struct i40e_ring *rx_ring;
655 703
656 if (!tx_ring) 704 if (!tx_ring)
657 continue; 705 continue;
@@ -662,33 +710,45 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
662 data[i] = tx_ring->stats.packets; 710 data[i] = tx_ring->stats.packets;
663 data[i + 1] = tx_ring->stats.bytes; 711 data[i + 1] = tx_ring->stats.bytes;
664 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 712 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
713 i += 2;
665 714
666 /* Rx ring is the 2nd half of the queue pair */ 715 /* Rx ring is the 2nd half of the queue pair */
667 rx_ring = &tx_ring[1]; 716 rx_ring = &tx_ring[1];
668 do { 717 do {
669 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 718 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
670 data[i + 2] = rx_ring->stats.packets; 719 data[i] = rx_ring->stats.packets;
671 data[i + 3] = rx_ring->stats.bytes; 720 data[i + 1] = rx_ring->stats.bytes;
672 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 721 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
722 i += 2;
673 } 723 }
674 rcu_read_unlock(); 724 rcu_read_unlock();
675 if (vsi == pf->vsi[pf->lan_vsi]) { 725 if (vsi != pf->vsi[pf->lan_vsi])
676 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 726 return;
677 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 727
678 data[i++] = (i40e_gstrings_stats[j].sizeof_stat == 728 if (pf->lan_veb != I40E_NO_VEB) {
679 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 729 struct i40e_veb *veb = pf->veb[pf->lan_veb];
680 } 730 for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
681 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 731 p = (char *)veb;
682 data[i++] = pf->stats.priority_xon_tx[j]; 732 p += i40e_gstrings_veb_stats[j].stat_offset;
683 data[i++] = pf->stats.priority_xoff_tx[j]; 733 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
684 } 734 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
685 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
686 data[i++] = pf->stats.priority_xon_rx[j];
687 data[i++] = pf->stats.priority_xoff_rx[j];
688 } 735 }
689 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
690 data[i++] = pf->stats.priority_xon_2_xoff[j];
691 } 736 }
737 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
738 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
739 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
740 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
741 }
742 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
743 data[i++] = pf->stats.priority_xon_tx[j];
744 data[i++] = pf->stats.priority_xoff_tx[j];
745 }
746 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
747 data[i++] = pf->stats.priority_xon_rx[j];
748 data[i++] = pf->stats.priority_xoff_rx[j];
749 }
750 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
751 data[i++] = pf->stats.priority_xon_2_xoff[j];
692} 752}
693 753
694static void i40e_get_strings(struct net_device *netdev, u32 stringset, 754static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -713,6 +773,11 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
713 i40e_gstrings_net_stats[i].stat_string); 773 i40e_gstrings_net_stats[i].stat_string);
714 p += ETH_GSTRING_LEN; 774 p += ETH_GSTRING_LEN;
715 } 775 }
776 for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
777 snprintf(p, ETH_GSTRING_LEN, "%s",
778 i40e_gstrings_misc_stats[i].stat_string);
779 p += ETH_GSTRING_LEN;
780 }
716 for (i = 0; i < vsi->num_queue_pairs; i++) { 781 for (i = 0; i < vsi->num_queue_pairs; i++) {
717 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); 782 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
718 p += ETH_GSTRING_LEN; 783 p += ETH_GSTRING_LEN;
@@ -723,34 +788,42 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
723 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); 788 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
724 p += ETH_GSTRING_LEN; 789 p += ETH_GSTRING_LEN;
725 } 790 }
726 if (vsi == pf->vsi[pf->lan_vsi]) { 791 if (vsi != pf->vsi[pf->lan_vsi])
727 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { 792 return;
728 snprintf(p, ETH_GSTRING_LEN, "port.%s", 793
729 i40e_gstrings_stats[i].stat_string); 794 if (pf->lan_veb != I40E_NO_VEB) {
730 p += ETH_GSTRING_LEN; 795 for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
731 } 796 snprintf(p, ETH_GSTRING_LEN, "veb.%s",
732 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 797 i40e_gstrings_veb_stats[i].stat_string);
733 snprintf(p, ETH_GSTRING_LEN,
734 "port.tx_priority_%u_xon", i);
735 p += ETH_GSTRING_LEN;
736 snprintf(p, ETH_GSTRING_LEN,
737 "port.tx_priority_%u_xoff", i);
738 p += ETH_GSTRING_LEN;
739 }
740 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
741 snprintf(p, ETH_GSTRING_LEN,
742 "port.rx_priority_%u_xon", i);
743 p += ETH_GSTRING_LEN;
744 snprintf(p, ETH_GSTRING_LEN,
745 "port.rx_priority_%u_xoff", i);
746 p += ETH_GSTRING_LEN;
747 }
748 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
749 snprintf(p, ETH_GSTRING_LEN,
750 "port.rx_priority_%u_xon_2_xoff", i);
751 p += ETH_GSTRING_LEN; 798 p += ETH_GSTRING_LEN;
752 } 799 }
753 } 800 }
801 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
802 snprintf(p, ETH_GSTRING_LEN, "port.%s",
803 i40e_gstrings_stats[i].stat_string);
804 p += ETH_GSTRING_LEN;
805 }
806 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
807 snprintf(p, ETH_GSTRING_LEN,
808 "port.tx_priority_%u_xon", i);
809 p += ETH_GSTRING_LEN;
810 snprintf(p, ETH_GSTRING_LEN,
811 "port.tx_priority_%u_xoff", i);
812 p += ETH_GSTRING_LEN;
813 }
814 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
815 snprintf(p, ETH_GSTRING_LEN,
816 "port.rx_priority_%u_xon", i);
817 p += ETH_GSTRING_LEN;
818 snprintf(p, ETH_GSTRING_LEN,
819 "port.rx_priority_%u_xoff", i);
820 p += ETH_GSTRING_LEN;
821 }
822 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
823 snprintf(p, ETH_GSTRING_LEN,
824 "port.rx_priority_%u_xon_2_xoff", i);
825 p += ETH_GSTRING_LEN;
826 }
754 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ 827 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
755 break; 828 break;
756 } 829 }
@@ -1007,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
1007 ec->rx_max_coalesced_frames_irq = vsi->work_limit; 1080 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1008 1081
1009 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 1082 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1010 ec->rx_coalesce_usecs = 1; 1083 ec->use_adaptive_rx_coalesce = 1;
1011 else
1012 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
1013 1084
1014 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 1085 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1015 ec->tx_coalesce_usecs = 1; 1086 ec->use_adaptive_tx_coalesce = 1;
1016 else 1087
1017 ec->tx_coalesce_usecs = vsi->tx_itr_setting; 1088 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
1089 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
1018 1090
1019 return 0; 1091 return 0;
1020} 1092}
@@ -1033,37 +1105,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
1033 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) 1105 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1034 vsi->work_limit = ec->tx_max_coalesced_frames_irq; 1106 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1035 1107
1036 switch (ec->rx_coalesce_usecs) { 1108 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1037 case 0: 1109 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
1038 vsi->rx_itr_setting = 0;
1039 break;
1040 case 1:
1041 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1042 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1043 break;
1044 default:
1045 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1046 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1047 return -EINVAL;
1048 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 1110 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1049 break; 1111 else
1050 } 1112 return -EINVAL;
1051 1113
1052 switch (ec->tx_coalesce_usecs) { 1114 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1053 case 0: 1115 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
1054 vsi->tx_itr_setting = 0;
1055 break;
1056 case 1:
1057 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1058 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1059 break;
1060 default:
1061 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1062 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1063 return -EINVAL;
1064 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 1116 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1065 break; 1117 else
1066 } 1118 return -EINVAL;
1119
1120 if (ec->use_adaptive_rx_coalesce)
1121 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
1122 else
1123 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
1124
1125 if (ec->use_adaptive_tx_coalesce)
1126 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
1127 else
1128 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
1067 1129
1068 vector = vsi->base_vector; 1130 vector = vsi->base_vector;
1069 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1131 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1140,8 +1202,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1140 int cnt = 0; 1202 int cnt = 0;
1141 1203
1142 /* report total rule count */ 1204 /* report total rule count */
1143 cmd->data = pf->hw.fdir_shared_filter_count + 1205 cmd->data = i40e_get_fd_cnt_all(pf);
1144 pf->fdir_pf_filter_count;
1145 1206
1146 hlist_for_each_entry_safe(rule, node2, 1207 hlist_for_each_entry_safe(rule, node2,
1147 &pf->fdir_filter_list, fdir_node) { 1208 &pf->fdir_filter_list, fdir_node) {
@@ -1175,10 +1236,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1175 struct i40e_fdir_filter *rule = NULL; 1236 struct i40e_fdir_filter *rule = NULL;
1176 struct hlist_node *node2; 1237 struct hlist_node *node2;
1177 1238
1178 /* report total rule count */
1179 cmd->data = pf->hw.fdir_shared_filter_count +
1180 pf->fdir_pf_filter_count;
1181
1182 hlist_for_each_entry_safe(rule, node2, 1239 hlist_for_each_entry_safe(rule, node2,
1183 &pf->fdir_filter_list, fdir_node) { 1240 &pf->fdir_filter_list, fdir_node) {
1184 if (fsp->location <= rule->fd_id) 1241 if (fsp->location <= rule->fd_id)
@@ -1189,11 +1246,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1189 return -EINVAL; 1246 return -EINVAL;
1190 1247
1191 fsp->flow_type = rule->flow_type; 1248 fsp->flow_type = rule->flow_type;
1192 fsp->h_u.tcp_ip4_spec.psrc = rule->src_port; 1249 if (fsp->flow_type == IP_USER_FLOW) {
1193 fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port; 1250 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1194 fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0]; 1251 fsp->h_u.usr_ip4_spec.proto = 0;
1195 fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0]; 1252 fsp->m_u.usr_ip4_spec.proto = 0;
1196 fsp->ring_cookie = rule->q_index; 1253 }
1254
1255 /* Reverse the src and dest notion, since the HW views them from
1256 * Tx perspective where as the user expects it from Rx filter view.
1257 */
1258 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
1259 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
1260 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
1261 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
1262
1263 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
1264 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1265 else
1266 fsp->ring_cookie = rule->q_index;
1197 1267
1198 return 0; 1268 return 0;
1199} 1269}
@@ -1223,6 +1293,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1223 break; 1293 break;
1224 case ETHTOOL_GRXCLSRLCNT: 1294 case ETHTOOL_GRXCLSRLCNT:
1225 cmd->rule_cnt = pf->fdir_pf_active_filters; 1295 cmd->rule_cnt = pf->fdir_pf_active_filters;
1296 /* report total rule count */
1297 cmd->data = i40e_get_fd_cnt_all(pf);
1226 ret = 0; 1298 ret = 0;
1227 break; 1299 break;
1228 case ETHTOOL_GRXCLSRULE: 1300 case ETHTOOL_GRXCLSRULE:
@@ -1291,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1291 case UDP_V4_FLOW: 1363 case UDP_V4_FLOW:
1292 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1364 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1293 case 0: 1365 case 0:
1294 hena &= 1366 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1295 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 1367 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1296 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1297 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1298 break; 1368 break;
1299 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1369 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1300 hena |= 1370 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1301 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 1371 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1302 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1303 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1304 break; 1372 break;
1305 default: 1373 default:
1306 return -EINVAL; 1374 return -EINVAL;
@@ -1309,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1309 case UDP_V6_FLOW: 1377 case UDP_V6_FLOW:
1310 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1378 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1311 case 0: 1379 case 0:
1312 hena &= 1380 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1313 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 1381 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1314 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1315 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1316 break; 1382 break;
1317 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1383 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1318 hena |= 1384 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1319 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 1385 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1320 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1321 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1322 break; 1386 break;
1323 default: 1387 default:
1324 return -EINVAL; 1388 return -EINVAL;
@@ -1503,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1503 return -EINVAL; 1567 return -EINVAL;
1504 } 1568 }
1505 1569
1506 if (fsp->ring_cookie >= vsi->num_queue_pairs) 1570 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1571 (fsp->ring_cookie >= vsi->num_queue_pairs))
1507 return -EINVAL; 1572 return -EINVAL;
1508 1573
1509 input = kzalloc(sizeof(*input), GFP_KERNEL); 1574 input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -1524,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1524 input->pctype = 0; 1589 input->pctype = 0;
1525 input->dest_vsi = vsi->id; 1590 input->dest_vsi = vsi->id;
1526 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; 1591 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1527 input->cnt_index = 0; 1592 input->cnt_index = pf->fd_sb_cnt_idx;
1528 input->flow_type = fsp->flow_type; 1593 input->flow_type = fsp->flow_type;
1529 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; 1594 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
1530 input->src_port = fsp->h_u.tcp_ip4_spec.psrc; 1595
1531 input->dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1596 /* Reverse the src and dest notion, since the HW expects them to be from
1532 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 1597 * Tx perspective where as the input from user is from Rx filter view.
1533 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 1598 */
1599 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
1600 input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
1601 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
1602 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
1534 1603
1535 ret = i40e_add_del_fdir(vsi, input, true); 1604 ret = i40e_add_del_fdir(vsi, input, true);
1536 if (ret) 1605 if (ret)
@@ -1692,5 +1761,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
1692 1761
1693void i40e_set_ethtool_ops(struct net_device *netdev) 1762void i40e_set_ethtool_ops(struct net_device *netdev)
1694{ 1763{
1695 SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops); 1764 netdev->ethtool_ops = &i40e_ethtool_ops;
1696} 1765}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index bf2d4cc5b569..9b987ccc9e82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -201,7 +201,7 @@ exit:
201 **/ 201 **/
202i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 202i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
203 struct i40e_hmc_info *hmc_info, 203 struct i40e_hmc_info *hmc_info,
204 u32 idx, bool is_pf) 204 u32 idx)
205{ 205{
206 i40e_status ret_code = 0; 206 i40e_status ret_code = 0;
207 struct i40e_hmc_pd_entry *pd_entry; 207 struct i40e_hmc_pd_entry *pd_entry;
@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
237 pd_addr = (u64 *)pd_table->pd_page_addr.va; 237 pd_addr = (u64 *)pd_table->pd_page_addr.va;
238 pd_addr += rel_pd_idx; 238 pd_addr += rel_pd_idx;
239 memset(pd_addr, 0, sizeof(u64)); 239 memset(pd_addr, 0, sizeof(u64));
240 if (is_pf) 240 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
241 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
242 else
243 I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
244 241
245 /* free memory here */ 242 /* free memory here */
246 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); 243 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 0cd4701234f8..b45d8fedc5e7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -163,11 +163,6 @@ struct i40e_hmc_info {
163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ 163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
165 165
166#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
167 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
168 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
169 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
170
171/** 166/**
172 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit 167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
173 * @hmc_info: pointer to the HMC configuration information structure 168 * @hmc_info: pointer to the HMC configuration information structure
@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
226 u32 pd_index); 221 u32 pd_index);
227i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 222i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
228 struct i40e_hmc_info *hmc_info, 223 struct i40e_hmc_info *hmc_info,
229 u32 idx, bool is_pf); 224 u32 idx);
230i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 225i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
231 u32 idx); 226 u32 idx);
232i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 227i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..870ab1ee072c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
397 /* remove the backing pages from pd_idx1 to i */ 397 /* remove the backing pages from pd_idx1 to i */
398 while (i && (i > pd_idx1)) { 398 while (i && (i > pd_idx1)) {
399 i40e_remove_pd_bp(hw, info->hmc_info, 399 i40e_remove_pd_bp(hw, info->hmc_info,
400 (i - 1), true); 400 (i - 1));
401 i--; 401 i--;
402 } 402 }
403 } 403 }
@@ -433,11 +433,7 @@ exit_sd_error:
433 ((j - 1) * I40E_HMC_MAX_BP_COUNT)); 433 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
434 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); 434 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
435 for (i = pd_idx1; i < pd_lmt1; i++) { 435 for (i = pd_idx1; i < pd_lmt1; i++) {
436 i40e_remove_pd_bp( 436 i40e_remove_pd_bp(hw, info->hmc_info, i);
437 hw,
438 info->hmc_info,
439 i,
440 true);
441 } 437 }
442 i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); 438 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
443 break; 439 break;
@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
616 pd_table = 612 pd_table =
617 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 613 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
618 if (pd_table->pd_entry[rel_pd_idx].valid) { 614 if (pd_table->pd_entry[rel_pd_idx].valid) {
619 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, 615 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
620 j, true);
621 if (ret_code) 616 if (ret_code)
622 goto exit; 617 goto exit;
623 } 618 }
@@ -747,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, 742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, 743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, 744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
750 { 0 } 746 { 0 }
751}; 747};
752 748
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
56 u8 tphdata_ena; 56 u8 tphdata_ena;
57 u8 tphhead_ena; 57 u8 tphhead_ena;
58 u8 lrxqthresh; 58 u8 lrxqthresh;
59 u8 prefena; /* NOTE: normally must be set to 1 at init */
59}; 60};
60 61
61/* Tx queue context data */ 62/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e72449f1265..275ca9a1719e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
38#define DRV_KERN "-k" 38#define DRV_KERN "-k"
39 39
40#define DRV_VERSION_MAJOR 0 40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 3 41#define DRV_VERSION_MINOR 4
42#define DRV_VERSION_BUILD 36 42#define DRV_VERSION_BUILD 10
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
67 */ 67 */
68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { 68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
@@ -356,6 +354,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
356 struct rtnl_link_stats64 *stats) 354 struct rtnl_link_stats64 *stats)
357{ 355{
358 struct i40e_netdev_priv *np = netdev_priv(netdev); 356 struct i40e_netdev_priv *np = netdev_priv(netdev);
357 struct i40e_ring *tx_ring, *rx_ring;
359 struct i40e_vsi *vsi = np->vsi; 358 struct i40e_vsi *vsi = np->vsi;
360 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 359 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
361 int i; 360 int i;
@@ -368,7 +367,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
368 367
369 rcu_read_lock(); 368 rcu_read_lock();
370 for (i = 0; i < vsi->num_queue_pairs; i++) { 369 for (i = 0; i < vsi->num_queue_pairs; i++) {
371 struct i40e_ring *tx_ring, *rx_ring;
372 u64 bytes, packets; 370 u64 bytes, packets;
373 unsigned int start; 371 unsigned int start;
374 372
@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
397 } 395 }
398 rcu_read_unlock(); 396 rcu_read_unlock();
399 397
400 /* following stats updated by ixgbe_watchdog_task() */ 398 /* following stats updated by i40e_watchdog_subtask() */
401 stats->multicast = vsi_stats->multicast; 399 stats->multicast = vsi_stats->multicast;
402 stats->tx_errors = vsi_stats->tx_errors; 400 stats->tx_errors = vsi_stats->tx_errors;
403 stats->tx_dropped = vsi_stats->tx_dropped; 401 stats->tx_dropped = vsi_stats->tx_dropped;
@@ -530,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
530 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 528 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
531 vsi->stat_offsets_loaded, 529 vsi->stat_offsets_loaded,
532 &oes->rx_discards, &es->rx_discards); 530 &oes->rx_discards, &es->rx_discards);
531 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
532 vsi->stat_offsets_loaded,
533 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
534 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
535 vsi->stat_offsets_loaded,
536 &oes->tx_errors, &es->tx_errors);
533 537
534 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 538 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
535 I40E_GLV_GORCL(stat_idx), 539 I40E_GLV_GORCL(stat_idx),
@@ -648,10 +652,10 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
648 return; 652 return;
649 653
650 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 654 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
651 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 655 for (v = 0; v < pf->num_alloc_vsi; v++) {
652 struct i40e_vsi *vsi = pf->vsi[v]; 656 struct i40e_vsi *vsi = pf->vsi[v];
653 657
654 if (!vsi) 658 if (!vsi || !vsi->tx_rings[0])
655 continue; 659 continue;
656 660
657 for (i = 0; i < vsi->num_queue_pairs; i++) { 661 for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -702,10 +706,10 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
702 } 706 }
703 707
704 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 708 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
705 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 709 for (v = 0; v < pf->num_alloc_vsi; v++) {
706 struct i40e_vsi *vsi = pf->vsi[v]; 710 struct i40e_vsi *vsi = pf->vsi[v];
707 711
708 if (!vsi) 712 if (!vsi || !vsi->tx_rings[0])
709 continue; 713 continue;
710 714
711 for (i = 0; i < vsi->num_queue_pairs; i++) { 715 for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -720,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
720} 724}
721 725
722/** 726/**
723 * i40e_update_stats - Update the board statistics counters. 727 * i40e_update_vsi_stats - Update the vsi statistics counters.
724 * @vsi: the VSI to be updated 728 * @vsi: the VSI to be updated
725 * 729 *
726 * There are a few instances where we store the same stat in a 730 * There are a few instances where we store the same stat in a
727 * couple of different structs. This is partly because we have 731 * couple of different structs. This is partly because we have
728 * the netdev stats that need to be filled out, which is slightly 732 * the netdev stats that need to be filled out, which is slightly
729 * different from the "eth_stats" defined by the chip and used in 733 * different from the "eth_stats" defined by the chip and used in
730 * VF communications. We sort it all out here in a central place. 734 * VF communications. We sort it out here.
731 **/ 735 **/
732void i40e_update_stats(struct i40e_vsi *vsi) 736static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
733{ 737{
734 struct i40e_pf *pf = vsi->back; 738 struct i40e_pf *pf = vsi->back;
735 struct i40e_hw *hw = &pf->hw;
736 struct rtnl_link_stats64 *ons; 739 struct rtnl_link_stats64 *ons;
737 struct rtnl_link_stats64 *ns; /* netdev stats */ 740 struct rtnl_link_stats64 *ns; /* netdev stats */
738 struct i40e_eth_stats *oes; 741 struct i40e_eth_stats *oes;
@@ -741,8 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
741 u32 rx_page, rx_buf; 744 u32 rx_page, rx_buf;
742 u64 rx_p, rx_b; 745 u64 rx_p, rx_b;
743 u64 tx_p, tx_b; 746 u64 tx_p, tx_b;
744 u32 val;
745 int i;
746 u16 q; 747 u16 q;
747 748
748 if (test_bit(__I40E_DOWN, &vsi->state) || 749 if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -804,196 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
804 ns->tx_packets = tx_p; 805 ns->tx_packets = tx_p;
805 ns->tx_bytes = tx_b; 806 ns->tx_bytes = tx_b;
806 807
807 i40e_update_eth_stats(vsi);
808 /* update netdev stats from eth stats */ 808 /* update netdev stats from eth stats */
809 ons->rx_errors = oes->rx_errors; 809 i40e_update_eth_stats(vsi);
810 ns->rx_errors = es->rx_errors;
811 ons->tx_errors = oes->tx_errors; 810 ons->tx_errors = oes->tx_errors;
812 ns->tx_errors = es->tx_errors; 811 ns->tx_errors = es->tx_errors;
813 ons->multicast = oes->rx_multicast; 812 ons->multicast = oes->rx_multicast;
814 ns->multicast = es->rx_multicast; 813 ns->multicast = es->rx_multicast;
814 ons->rx_dropped = oes->rx_discards;
815 ns->rx_dropped = es->rx_discards;
815 ons->tx_dropped = oes->tx_discards; 816 ons->tx_dropped = oes->tx_discards;
816 ns->tx_dropped = es->tx_discards; 817 ns->tx_dropped = es->tx_discards;
817 818
818 /* Get the port data only if this is the main PF VSI */ 819 /* pull in a couple PF stats if this is the main vsi */
819 if (vsi == pf->vsi[pf->lan_vsi]) { 820 if (vsi == pf->vsi[pf->lan_vsi]) {
820 struct i40e_hw_port_stats *nsd = &pf->stats; 821 ns->rx_crc_errors = pf->stats.crc_errors;
821 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 822 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
823 ns->rx_length_errors = pf->stats.rx_length_errors;
824 }
825}
822 826
823 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 827/**
824 I40E_GLPRT_GORCL(hw->port), 828 * i40e_update_pf_stats - Update the pf statistics counters.
825 pf->stat_offsets_loaded, 829 * @pf: the PF to be updated
826 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 830 **/
827 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 831static void i40e_update_pf_stats(struct i40e_pf *pf)
828 I40E_GLPRT_GOTCL(hw->port), 832{
829 pf->stat_offsets_loaded, 833 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
830 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 834 struct i40e_hw_port_stats *nsd = &pf->stats;
831 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 835 struct i40e_hw *hw = &pf->hw;
832 pf->stat_offsets_loaded, 836 u32 val;
833 &osd->eth.rx_discards, 837 int i;
834 &nsd->eth.rx_discards);
835 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
836 pf->stat_offsets_loaded,
837 &osd->eth.tx_discards,
838 &nsd->eth.tx_discards);
839 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
840 I40E_GLPRT_MPRCL(hw->port),
841 pf->stat_offsets_loaded,
842 &osd->eth.rx_multicast,
843 &nsd->eth.rx_multicast);
844 838
845 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 839 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
846 pf->stat_offsets_loaded, 840 I40E_GLPRT_GORCL(hw->port),
847 &osd->tx_dropped_link_down, 841 pf->stat_offsets_loaded,
848 &nsd->tx_dropped_link_down); 842 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
843 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
844 I40E_GLPRT_GOTCL(hw->port),
845 pf->stat_offsets_loaded,
846 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
847 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
848 pf->stat_offsets_loaded,
849 &osd->eth.rx_discards,
850 &nsd->eth.rx_discards);
851 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
852 pf->stat_offsets_loaded,
853 &osd->eth.tx_discards,
854 &nsd->eth.tx_discards);
849 855
850 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 856 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
851 pf->stat_offsets_loaded, 857 I40E_GLPRT_UPRCL(hw->port),
852 &osd->crc_errors, &nsd->crc_errors); 858 pf->stat_offsets_loaded,
853 ns->rx_crc_errors = nsd->crc_errors; 859 &osd->eth.rx_unicast,
860 &nsd->eth.rx_unicast);
861 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
862 I40E_GLPRT_MPRCL(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->eth.rx_multicast,
865 &nsd->eth.rx_multicast);
866 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
867 I40E_GLPRT_BPRCL(hw->port),
868 pf->stat_offsets_loaded,
869 &osd->eth.rx_broadcast,
870 &nsd->eth.rx_broadcast);
871 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
872 I40E_GLPRT_UPTCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.tx_unicast,
875 &nsd->eth.tx_unicast);
876 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
877 I40E_GLPRT_MPTCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.tx_multicast,
880 &nsd->eth.tx_multicast);
881 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
882 I40E_GLPRT_BPTCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.tx_broadcast,
885 &nsd->eth.tx_broadcast);
854 886
855 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 887 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
856 pf->stat_offsets_loaded, 888 pf->stat_offsets_loaded,
857 &osd->illegal_bytes, &nsd->illegal_bytes); 889 &osd->tx_dropped_link_down,
858 ns->rx_errors = nsd->crc_errors 890 &nsd->tx_dropped_link_down);
859 + nsd->illegal_bytes;
860 891
861 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 892 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
862 pf->stat_offsets_loaded, 893 pf->stat_offsets_loaded,
863 &osd->mac_local_faults, 894 &osd->crc_errors, &nsd->crc_errors);
864 &nsd->mac_local_faults);
865 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->mac_remote_faults,
868 &nsd->mac_remote_faults);
869 895
870 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 896 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
871 pf->stat_offsets_loaded, 897 pf->stat_offsets_loaded,
872 &osd->rx_length_errors, 898 &osd->illegal_bytes, &nsd->illegal_bytes);
873 &nsd->rx_length_errors);
874 ns->rx_length_errors = nsd->rx_length_errors;
875 899
876 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 900 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
877 pf->stat_offsets_loaded, 901 pf->stat_offsets_loaded,
878 &osd->link_xon_rx, &nsd->link_xon_rx); 902 &osd->mac_local_faults,
879 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 903 &nsd->mac_local_faults);
880 pf->stat_offsets_loaded, 904 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
881 &osd->link_xon_tx, &nsd->link_xon_tx); 905 pf->stat_offsets_loaded,
882 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 906 &osd->mac_remote_faults,
883 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 907 &nsd->mac_remote_faults);
884 pf->stat_offsets_loaded,
885 &osd->link_xoff_tx, &nsd->link_xoff_tx);
886
887 for (i = 0; i < 8; i++) {
888 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
889 pf->stat_offsets_loaded,
890 &osd->priority_xon_rx[i],
891 &nsd->priority_xon_rx[i]);
892 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
893 pf->stat_offsets_loaded,
894 &osd->priority_xon_tx[i],
895 &nsd->priority_xon_tx[i]);
896 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
897 pf->stat_offsets_loaded,
898 &osd->priority_xoff_tx[i],
899 &nsd->priority_xoff_tx[i]);
900 i40e_stat_update32(hw,
901 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
902 pf->stat_offsets_loaded,
903 &osd->priority_xon_2_xoff[i],
904 &nsd->priority_xon_2_xoff[i]);
905 }
906 908
907 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 909 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
908 I40E_GLPRT_PRC64L(hw->port), 910 pf->stat_offsets_loaded,
909 pf->stat_offsets_loaded, 911 &osd->rx_length_errors,
910 &osd->rx_size_64, &nsd->rx_size_64); 912 &nsd->rx_length_errors);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
912 I40E_GLPRT_PRC127L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_127, &nsd->rx_size_127);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
916 I40E_GLPRT_PRC255L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_255, &nsd->rx_size_255);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
920 I40E_GLPRT_PRC511L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_511, &nsd->rx_size_511);
923 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
924 I40E_GLPRT_PRC1023L(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_size_1023, &nsd->rx_size_1023);
927 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
928 I40E_GLPRT_PRC1522L(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->rx_size_1522, &nsd->rx_size_1522);
931 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
932 I40E_GLPRT_PRC9522L(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_size_big, &nsd->rx_size_big);
935 913
936 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 914 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
937 I40E_GLPRT_PTC64L(hw->port), 915 pf->stat_offsets_loaded,
938 pf->stat_offsets_loaded, 916 &osd->link_xon_rx, &nsd->link_xon_rx);
939 &osd->tx_size_64, &nsd->tx_size_64); 917 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
940 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 918 pf->stat_offsets_loaded,
941 I40E_GLPRT_PTC127L(hw->port), 919 &osd->link_xon_tx, &nsd->link_xon_tx);
942 pf->stat_offsets_loaded, 920 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
943 &osd->tx_size_127, &nsd->tx_size_127); 921 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
944 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 922 pf->stat_offsets_loaded,
945 I40E_GLPRT_PTC255L(hw->port), 923 &osd->link_xoff_tx, &nsd->link_xoff_tx);
946 pf->stat_offsets_loaded,
947 &osd->tx_size_255, &nsd->tx_size_255);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
949 I40E_GLPRT_PTC511L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_511, &nsd->tx_size_511);
952 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
953 I40E_GLPRT_PTC1023L(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_size_1023, &nsd->tx_size_1023);
956 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
957 I40E_GLPRT_PTC1522L(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->tx_size_1522, &nsd->tx_size_1522);
960 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
961 I40E_GLPRT_PTC9522L(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->tx_size_big, &nsd->tx_size_big);
964 924
965 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 925 for (i = 0; i < 8; i++) {
966 pf->stat_offsets_loaded, 926 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 &osd->rx_undersize, &nsd->rx_undersize);
968 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
969 pf->stat_offsets_loaded, 927 pf->stat_offsets_loaded,
970 &osd->rx_fragments, &nsd->rx_fragments); 928 &osd->priority_xon_rx[i],
971 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 929 &nsd->priority_xon_rx[i]);
930 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
972 pf->stat_offsets_loaded, 931 pf->stat_offsets_loaded,
973 &osd->rx_oversize, &nsd->rx_oversize); 932 &osd->priority_xon_tx[i],
974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 933 &nsd->priority_xon_tx[i]);
934 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded, 935 pf->stat_offsets_loaded,
976 &osd->rx_jabber, &nsd->rx_jabber); 936 &osd->priority_xoff_tx[i],
977 937 &nsd->priority_xoff_tx[i]);
978 val = rd32(hw, I40E_PRTPM_EEE_STAT); 938 i40e_stat_update32(hw,
979 nsd->tx_lpi_status = 939 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
982 nsd->rx_lpi_status =
983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
986 pf->stat_offsets_loaded, 940 pf->stat_offsets_loaded,
987 &osd->tx_lpi_count, &nsd->tx_lpi_count); 941 &osd->priority_xon_2_xoff[i],
988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 942 &nsd->priority_xon_2_xoff[i]);
989 pf->stat_offsets_loaded,
990 &osd->rx_lpi_count, &nsd->rx_lpi_count);
991 } 943 }
992 944
945 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
946 I40E_GLPRT_PRC64L(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->rx_size_64, &nsd->rx_size_64);
949 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
950 I40E_GLPRT_PRC127L(hw->port),
951 pf->stat_offsets_loaded,
952 &osd->rx_size_127, &nsd->rx_size_127);
953 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
954 I40E_GLPRT_PRC255L(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->rx_size_255, &nsd->rx_size_255);
957 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
958 I40E_GLPRT_PRC511L(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->rx_size_511, &nsd->rx_size_511);
961 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
962 I40E_GLPRT_PRC1023L(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->rx_size_1023, &nsd->rx_size_1023);
965 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
966 I40E_GLPRT_PRC1522L(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->rx_size_1522, &nsd->rx_size_1522);
969 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
970 I40E_GLPRT_PRC9522L(hw->port),
971 pf->stat_offsets_loaded,
972 &osd->rx_size_big, &nsd->rx_size_big);
973
974 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
975 I40E_GLPRT_PTC64L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->tx_size_64, &nsd->tx_size_64);
978 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
979 I40E_GLPRT_PTC127L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->tx_size_127, &nsd->tx_size_127);
982 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
983 I40E_GLPRT_PTC255L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->tx_size_255, &nsd->tx_size_255);
986 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
987 I40E_GLPRT_PTC511L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->tx_size_511, &nsd->tx_size_511);
990 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
991 I40E_GLPRT_PTC1023L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->tx_size_1023, &nsd->tx_size_1023);
994 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
995 I40E_GLPRT_PTC1522L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->tx_size_1522, &nsd->tx_size_1522);
998 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
999 I40E_GLPRT_PTC9522L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->tx_size_big, &nsd->tx_size_big);
1002
1003 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1004 pf->stat_offsets_loaded,
1005 &osd->rx_undersize, &nsd->rx_undersize);
1006 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_fragments, &nsd->rx_fragments);
1009 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->rx_oversize, &nsd->rx_oversize);
1012 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->rx_jabber, &nsd->rx_jabber);
1015
1016 /* FDIR stats */
1017 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1018 pf->stat_offsets_loaded,
1019 &osd->fd_atr_match, &nsd->fd_atr_match);
1020 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1021 pf->stat_offsets_loaded,
1022 &osd->fd_sb_match, &nsd->fd_sb_match);
1023
1024 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1025 nsd->tx_lpi_status =
1026 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1027 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1028 nsd->rx_lpi_status =
1029 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1030 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1031 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1032 pf->stat_offsets_loaded,
1033 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1034 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1035 pf->stat_offsets_loaded,
1036 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1037
993 pf->stat_offsets_loaded = true; 1038 pf->stat_offsets_loaded = true;
994} 1039}
995 1040
996/** 1041/**
1042 * i40e_update_stats - Update the various statistics counters.
1043 * @vsi: the VSI to be updated
1044 *
1045 * Update the various stats for this VSI and its related entities.
1046 **/
1047void i40e_update_stats(struct i40e_vsi *vsi)
1048{
1049 struct i40e_pf *pf = vsi->back;
1050
1051 if (vsi == pf->vsi[pf->lan_vsi])
1052 i40e_update_pf_stats(pf);
1053
1054 i40e_update_vsi_stats(vsi);
1055}
1056
1057/**
997 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1058 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
998 * @vsi: the VSI to be searched 1059 * @vsi: the VSI to be searched
999 * @macaddr: the MAC address 1060 * @macaddr: the MAC address
@@ -1101,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1101} 1162}
1102 1163
1103/** 1164/**
1165 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1166 * @vsi: the PF Main VSI - inappropriate for any other VSI
1167 * @macaddr: the MAC address
1168 **/
1169static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1170{
1171 struct i40e_aqc_remove_macvlan_element_data element;
1172 struct i40e_pf *pf = vsi->back;
1173 i40e_status aq_ret;
1174
1175 /* Only appropriate for the PF main VSI */
1176 if (vsi->type != I40E_VSI_MAIN)
1177 return;
1178
1179 ether_addr_copy(element.mac_addr, macaddr);
1180 element.vlan_tag = 0;
1181 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1182 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1183 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1184 if (aq_ret)
1185 dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
1186}
1187
1188/**
1104 * i40e_add_filter - Add a mac/vlan filter to the VSI 1189 * i40e_add_filter - Add a mac/vlan filter to the VSI
1105 * @vsi: the VSI to be searched 1190 * @vsi: the VSI to be searched
1106 * @macaddr: the MAC address 1191 * @macaddr: the MAC address
@@ -1125,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1125 if (!f) 1210 if (!f)
1126 goto add_filter_out; 1211 goto add_filter_out;
1127 1212
1128 memcpy(f->macaddr, macaddr, ETH_ALEN); 1213 ether_addr_copy(f->macaddr, macaddr);
1129 f->vlan = vlan; 1214 f->vlan = vlan;
1130 f->changed = true; 1215 f->changed = true;
1131 1216
@@ -1249,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1249 return -EADDRNOTAVAIL; 1334 return -EADDRNOTAVAIL;
1250 } 1335 }
1251 1336
1252 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); 1337 ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
1253 } 1338 }
1254 1339
1255 /* In order to be sure to not drop any packets, add the new address 1340 /* In order to be sure to not drop any packets, add the new address
@@ -1263,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1263 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); 1348 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1264 i40e_sync_vsi_filters(vsi); 1349 i40e_sync_vsi_filters(vsi);
1265 1350
1266 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1351 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1267 1352
1268 return 0; 1353 return 0;
1269} 1354}
@@ -1313,7 +1398,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1313 vsi->tc_config.numtc = numtc; 1398 vsi->tc_config.numtc = numtc;
1314 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1399 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1315 /* Number of queues per enabled TC */ 1400 /* Number of queues per enabled TC */
1316 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); 1401 num_tc_qps = vsi->alloc_queue_pairs/numtc;
1317 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1402 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1318 1403
1319 /* Setup queue offset/count for all TCs for given VSI */ 1404 /* Setup queue offset/count for all TCs for given VSI */
@@ -1520,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1520 cmd_flags = 0; 1605 cmd_flags = 0;
1521 1606
1522 /* add to delete list */ 1607 /* add to delete list */
1523 memcpy(del_list[num_del].mac_addr, 1608 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1524 f->macaddr, ETH_ALEN);
1525 del_list[num_del].vlan_tag = 1609 del_list[num_del].vlan_tag =
1526 cpu_to_le16((u16)(f->vlan == 1610 cpu_to_le16((u16)(f->vlan ==
1527 I40E_VLAN_ANY ? 0 : f->vlan)); 1611 I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1542,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1542 num_del = 0; 1626 num_del = 0;
1543 memset(del_list, 0, sizeof(*del_list)); 1627 memset(del_list, 0, sizeof(*del_list));
1544 1628
1545 if (aq_ret) 1629 if (aq_ret &&
1630 pf->hw.aq.asq_last_status !=
1631 I40E_AQ_RC_ENOENT)
1546 dev_info(&pf->pdev->dev, 1632 dev_info(&pf->pdev->dev,
1547 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1633 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1548 aq_ret, 1634 aq_ret,
@@ -1554,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1554 del_list, num_del, NULL); 1640 del_list, num_del, NULL);
1555 num_del = 0; 1641 num_del = 0;
1556 1642
1557 if (aq_ret) 1643 if (aq_ret &&
1644 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1558 dev_info(&pf->pdev->dev, 1645 dev_info(&pf->pdev->dev,
1559 "ignoring delete macvlan error, err %d, aq_err %d\n", 1646 "ignoring delete macvlan error, err %d, aq_err %d\n",
1560 aq_ret, pf->hw.aq.asq_last_status); 1647 aq_ret, pf->hw.aq.asq_last_status);
@@ -1583,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1583 cmd_flags = 0; 1670 cmd_flags = 0;
1584 1671
1585 /* add to add array */ 1672 /* add to add array */
1586 memcpy(add_list[num_add].mac_addr, 1673 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1587 f->macaddr, ETH_ALEN);
1588 add_list[num_add].vlan_tag = 1674 add_list[num_add].vlan_tag =
1589 cpu_to_le16( 1675 cpu_to_le16(
1590 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1676 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1681,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1681 return; 1767 return;
1682 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1768 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1683 1769
1684 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1770 for (v = 0; v < pf->num_alloc_vsi; v++) {
1685 if (pf->vsi[v] && 1771 if (pf->vsi[v] &&
1686 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1772 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1687 i40e_sync_vsi_filters(pf->vsi[v]); 1773 i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1698,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1698static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1784static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1699{ 1785{
1700 struct i40e_netdev_priv *np = netdev_priv(netdev); 1786 struct i40e_netdev_priv *np = netdev_priv(netdev);
1701 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1787 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1702 struct i40e_vsi *vsi = np->vsi; 1788 struct i40e_vsi *vsi = np->vsi;
1703 1789
1704 /* MTU < 68 is an error and causes problems on some kernels */ 1790 /* MTU < 68 is an error and causes problems on some kernels */
@@ -2312,6 +2398,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2312 rx_ctx.crcstrip = 1; 2398 rx_ctx.crcstrip = 1;
2313 rx_ctx.l2tsel = 1; 2399 rx_ctx.l2tsel = 1;
2314 rx_ctx.showiv = 1; 2400 rx_ctx.showiv = 1;
2401 /* set the prefena field to 1 because the manual says to */
2402 rx_ctx.prefena = 1;
2315 2403
2316 /* clear the context in the HMC */ 2404 /* clear the context in the HMC */
2317 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2405 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2413,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2413 **/ 2501 **/
2414static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2502static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2415{ 2503{
2504 struct i40e_ring *tx_ring, *rx_ring;
2416 u16 qoffset, qcount; 2505 u16 qoffset, qcount;
2417 int i, n; 2506 int i, n;
2418 2507
@@ -2426,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2426 qoffset = vsi->tc_config.tc_info[n].qoffset; 2515 qoffset = vsi->tc_config.tc_info[n].qoffset;
2427 qcount = vsi->tc_config.tc_info[n].qcount; 2516 qcount = vsi->tc_config.tc_info[n].qcount;
2428 for (i = qoffset; i < (qoffset + qcount); i++) { 2517 for (i = qoffset; i < (qoffset + qcount); i++) {
2429 struct i40e_ring *rx_ring = vsi->rx_rings[i]; 2518 rx_ring = vsi->rx_rings[i];
2430 struct i40e_ring *tx_ring = vsi->tx_rings[i]; 2519 tx_ring = vsi->tx_rings[i];
2431 rx_ring->dcb_tc = n; 2520 rx_ring->dcb_tc = n;
2432 tx_ring->dcb_tc = n; 2521 tx_ring->dcb_tc = n;
2433 } 2522 }
@@ -2565,7 +2654,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2565 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2654 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2566 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2655 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2567 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | 2656 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
2568 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2569 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2657 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2570 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2658 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2571 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2659 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2733,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2733 &q_vector->affinity_mask); 2821 &q_vector->affinity_mask);
2734 } 2822 }
2735 2823
2824 vsi->irqs_ready = true;
2736 return 0; 2825 return 0;
2737 2826
2738free_queue_irqs: 2827free_queue_irqs:
@@ -3152,6 +3241,12 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3152 3241
3153 pf_q = vsi->base_queue; 3242 pf_q = vsi->base_queue;
3154 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3243 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3244
3245 /* warn the TX unit of coming changes */
3246 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3247 if (!enable)
3248 udelay(10);
3249
3155 for (j = 0; j < 50; j++) { 3250 for (j = 0; j < 50; j++) {
3156 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3251 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3157 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3252 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
@@ -3160,9 +3255,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3160 usleep_range(1000, 2000); 3255 usleep_range(1000, 2000);
3161 } 3256 }
3162 /* Skip if the queue is already in the requested state */ 3257 /* Skip if the queue is already in the requested state */
3163 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3258 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3164 continue;
3165 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3166 continue; 3259 continue;
3167 3260
3168 /* turn on/off the queue */ 3261 /* turn on/off the queue */
@@ -3178,13 +3271,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3178 /* wait for the change to finish */ 3271 /* wait for the change to finish */
3179 for (j = 0; j < 10; j++) { 3272 for (j = 0; j < 10; j++) {
3180 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3273 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3181 if (enable) { 3274 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3182 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3275 break;
3183 break;
3184 } else {
3185 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3186 break;
3187 }
3188 3276
3189 udelay(10); 3277 udelay(10);
3190 } 3278 }
@@ -3223,15 +3311,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3223 usleep_range(1000, 2000); 3311 usleep_range(1000, 2000);
3224 } 3312 }
3225 3313
3226 if (enable) { 3314 /* Skip if the queue is already in the requested state */
3227 /* is STAT set ? */ 3315 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3228 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3316 continue;
3229 continue;
3230 } else {
3231 /* is !STAT set ? */
3232 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3233 continue;
3234 }
3235 3317
3236 /* turn on/off the queue */ 3318 /* turn on/off the queue */
3237 if (enable) 3319 if (enable)
@@ -3244,13 +3326,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3244 for (j = 0; j < 10; j++) { 3326 for (j = 0; j < 10; j++) {
3245 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3327 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3246 3328
3247 if (enable) { 3329 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3248 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3330 break;
3249 break;
3250 } else {
3251 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3252 break;
3253 }
3254 3331
3255 udelay(10); 3332 udelay(10);
3256 } 3333 }
@@ -3304,6 +3381,10 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3304 if (!vsi->q_vectors) 3381 if (!vsi->q_vectors)
3305 return; 3382 return;
3306 3383
3384 if (!vsi->irqs_ready)
3385 return;
3386
3387 vsi->irqs_ready = false;
3307 for (i = 0; i < vsi->num_q_vectors; i++) { 3388 for (i = 0; i < vsi->num_q_vectors; i++) {
3308 u16 vector = i + base; 3389 u16 vector = i + base;
3309 3390
@@ -3476,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3476 int i; 3557 int i;
3477 3558
3478 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3559 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3479 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3560 for (i = 0; i < pf->num_alloc_vsi; i++)
3480 if (pf->vsi[i]) 3561 if (pf->vsi[i])
3481 i40e_vsi_free_q_vectors(pf->vsi[i]); 3562 i40e_vsi_free_q_vectors(pf->vsi[i]);
3482 i40e_reset_interrupt_capability(pf); 3563 i40e_reset_interrupt_capability(pf);
@@ -3513,6 +3594,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3513} 3594}
3514 3595
3515/** 3596/**
3597 * i40e_vsi_close - Shut down a VSI
3598 * @vsi: the vsi to be quelled
3599 **/
3600static void i40e_vsi_close(struct i40e_vsi *vsi)
3601{
3602 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3603 i40e_down(vsi);
3604 i40e_vsi_free_irq(vsi);
3605 i40e_vsi_free_tx_resources(vsi);
3606 i40e_vsi_free_rx_resources(vsi);
3607}
3608
3609/**
3516 * i40e_quiesce_vsi - Pause a given VSI 3610 * i40e_quiesce_vsi - Pause a given VSI
3517 * @vsi: the VSI being paused 3611 * @vsi: the VSI being paused
3518 **/ 3612 **/
@@ -3525,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3525 if (vsi->netdev && netif_running(vsi->netdev)) { 3619 if (vsi->netdev && netif_running(vsi->netdev)) {
3526 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3620 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3527 } else { 3621 } else {
3528 set_bit(__I40E_DOWN, &vsi->state); 3622 i40e_vsi_close(vsi);
3529 i40e_down(vsi);
3530 } 3623 }
3531} 3624}
3532 3625
@@ -3543,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3543 if (vsi->netdev && netif_running(vsi->netdev)) 3636 if (vsi->netdev && netif_running(vsi->netdev))
3544 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3637 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3545 else 3638 else
3546 i40e_up(vsi); /* this clears the DOWN bit */ 3639 i40e_vsi_open(vsi); /* this clears the DOWN bit */
3547} 3640}
3548 3641
3549/** 3642/**
@@ -3554,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3554{ 3647{
3555 int v; 3648 int v;
3556 3649
3557 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3650 for (v = 0; v < pf->num_alloc_vsi; v++) {
3558 if (pf->vsi[v]) 3651 if (pf->vsi[v])
3559 i40e_quiesce_vsi(pf->vsi[v]); 3652 i40e_quiesce_vsi(pf->vsi[v]);
3560 } 3653 }
@@ -3568,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3568{ 3661{
3569 int v; 3662 int v;
3570 3663
3571 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3664 for (v = 0; v < pf->num_alloc_vsi; v++) {
3572 if (pf->vsi[v]) 3665 if (pf->vsi[v])
3573 i40e_unquiesce_vsi(pf->vsi[v]); 3666 i40e_unquiesce_vsi(pf->vsi[v]);
3574 } 3667 }
@@ -4009,7 +4102,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4009 } 4102 }
4010 4103
4011 /* Update each VSI */ 4104 /* Update each VSI */
4012 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4105 for (v = 0; v < pf->num_alloc_vsi; v++) {
4013 if (!pf->vsi[v]) 4106 if (!pf->vsi[v])
4014 continue; 4107 continue;
4015 4108
@@ -4028,6 +4121,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4028 pf->vsi[v]->seid); 4121 pf->vsi[v]->seid);
4029 /* Will try to configure as many components */ 4122 /* Will try to configure as many components */
4030 } else { 4123 } else {
4124 /* Re-configure VSI vectors based on updated TC map */
4125 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4031 if (pf->vsi[v]->netdev) 4126 if (pf->vsi[v]->netdev)
4032 i40e_dcbnl_set_all(pf->vsi[v]); 4127 i40e_dcbnl_set_all(pf->vsi[v]);
4033 } 4128 }
@@ -4065,14 +4160,69 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4065 /* When status is not DISABLED then DCBX in FW */ 4160 /* When status is not DISABLED then DCBX in FW */
4066 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4161 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4067 DCB_CAP_DCBX_VER_IEEE; 4162 DCB_CAP_DCBX_VER_IEEE;
4068 pf->flags |= I40E_FLAG_DCB_ENABLED; 4163
4164 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4165 /* Enable DCB tagging only when more than one TC */
4166 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4167 pf->flags |= I40E_FLAG_DCB_ENABLED;
4069 } 4168 }
4169 } else {
4170 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4171 pf->hw.aq.asq_last_status);
4070 } 4172 }
4071 4173
4072out: 4174out:
4073 return err; 4175 return err;
4074} 4176}
4075#endif /* CONFIG_I40E_DCB */ 4177#endif /* CONFIG_I40E_DCB */
4178#define SPEED_SIZE 14
4179#define FC_SIZE 8
4180/**
4181 * i40e_print_link_message - print link up or down
4182 * @vsi: the VSI for which link needs a message
4183 */
4184static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4185{
4186 char speed[SPEED_SIZE] = "Unknown";
4187 char fc[FC_SIZE] = "RX/TX";
4188
4189 if (!isup) {
4190 netdev_info(vsi->netdev, "NIC Link is Down\n");
4191 return;
4192 }
4193
4194 switch (vsi->back->hw.phy.link_info.link_speed) {
4195 case I40E_LINK_SPEED_40GB:
4196 strncpy(speed, "40 Gbps", SPEED_SIZE);
4197 break;
4198 case I40E_LINK_SPEED_10GB:
4199 strncpy(speed, "10 Gbps", SPEED_SIZE);
4200 break;
4201 case I40E_LINK_SPEED_1GB:
4202 strncpy(speed, "1000 Mbps", SPEED_SIZE);
4203 break;
4204 default:
4205 break;
4206 }
4207
4208 switch (vsi->back->hw.fc.current_mode) {
4209 case I40E_FC_FULL:
4210 strncpy(fc, "RX/TX", FC_SIZE);
4211 break;
4212 case I40E_FC_TX_PAUSE:
4213 strncpy(fc, "TX", FC_SIZE);
4214 break;
4215 case I40E_FC_RX_PAUSE:
4216 strncpy(fc, "RX", FC_SIZE);
4217 break;
4218 default:
4219 strncpy(fc, "None", FC_SIZE);
4220 break;
4221 }
4222
4223 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4224 speed, fc);
4225}
4076 4226
4077/** 4227/**
4078 * i40e_up_complete - Finish the last steps of bringing up a connection 4228 * i40e_up_complete - Finish the last steps of bringing up a connection
@@ -4099,11 +4249,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4099 4249
4100 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4250 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4101 (vsi->netdev)) { 4251 (vsi->netdev)) {
4102 netdev_info(vsi->netdev, "NIC Link is Up\n"); 4252 i40e_print_link_message(vsi, true);
4103 netif_tx_start_all_queues(vsi->netdev); 4253 netif_tx_start_all_queues(vsi->netdev);
4104 netif_carrier_on(vsi->netdev); 4254 netif_carrier_on(vsi->netdev);
4105 } else if (vsi->netdev) { 4255 } else if (vsi->netdev) {
4106 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4256 i40e_print_link_message(vsi, false);
4107 } 4257 }
4108 4258
4109 /* replay FDIR SB filters */ 4259 /* replay FDIR SB filters */
@@ -4309,24 +4459,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
4309 if (err) 4459 if (err)
4310 goto err_setup_rx; 4460 goto err_setup_rx;
4311 4461
4312 if (!vsi->netdev) { 4462 if (vsi->netdev) {
4313 err = EINVAL; 4463 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4314 goto err_setup_rx; 4464 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4315 } 4465 err = i40e_vsi_request_irq(vsi, int_name);
4316 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4466 if (err)
4317 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4467 goto err_setup_rx;
4318 err = i40e_vsi_request_irq(vsi, int_name);
4319 if (err)
4320 goto err_setup_rx;
4321 4468
4322 /* Notify the stack of the actual queue counts. */ 4469 /* Notify the stack of the actual queue counts. */
4323 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); 4470 err = netif_set_real_num_tx_queues(vsi->netdev,
4324 if (err) 4471 vsi->num_queue_pairs);
4325 goto err_set_queues; 4472 if (err)
4473 goto err_set_queues;
4326 4474
4327 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); 4475 err = netif_set_real_num_rx_queues(vsi->netdev,
4328 if (err) 4476 vsi->num_queue_pairs);
4329 goto err_set_queues; 4477 if (err)
4478 goto err_set_queues;
4479
4480 } else if (vsi->type == I40E_VSI_FDIR) {
4481 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4482 dev_driver_string(&pf->pdev->dev));
4483 err = i40e_vsi_request_irq(vsi, int_name);
4484 } else {
4485 err = -EINVAL;
4486 goto err_setup_rx;
4487 }
4330 4488
4331 err = i40e_up_complete(vsi); 4489 err = i40e_up_complete(vsi);
4332 if (err) 4490 if (err)
@@ -4383,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
4383 struct i40e_netdev_priv *np = netdev_priv(netdev); 4541 struct i40e_netdev_priv *np = netdev_priv(netdev);
4384 struct i40e_vsi *vsi = np->vsi; 4542 struct i40e_vsi *vsi = np->vsi;
4385 4543
4386 if (test_and_set_bit(__I40E_DOWN, &vsi->state)) 4544 i40e_vsi_close(vsi);
4387 return 0;
4388
4389 i40e_down(vsi);
4390 i40e_vsi_free_irq(vsi);
4391
4392 i40e_vsi_free_tx_resources(vsi);
4393 i40e_vsi_free_rx_resources(vsi);
4394 4545
4395 return 0; 4546 return 0;
4396} 4547}
@@ -4410,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4410 4561
4411 WARN_ON(in_interrupt()); 4562 WARN_ON(in_interrupt());
4412 4563
4564 if (i40e_check_asq_alive(&pf->hw))
4565 i40e_vc_notify_reset(pf);
4566
4413 /* do the biggest reset indicated */ 4567 /* do the biggest reset indicated */
4414 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4568 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4415 4569
@@ -4475,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4475 /* Find the VSI(s) that requested a re-init */ 4629 /* Find the VSI(s) that requested a re-init */
4476 dev_info(&pf->pdev->dev, 4630 dev_info(&pf->pdev->dev,
4477 "VSI reinit requested\n"); 4631 "VSI reinit requested\n");
4478 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4632 for (v = 0; v < pf->num_alloc_vsi; v++) {
4479 struct i40e_vsi *vsi = pf->vsi[v]; 4633 struct i40e_vsi *vsi = pf->vsi[v];
4480 if (vsi != NULL && 4634 if (vsi != NULL &&
4481 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4635 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4565,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4565 int ret = 0; 4719 int ret = 0;
4566 u8 type; 4720 u8 type;
4567 4721
4722 /* Not DCB capable or capability disabled */
4723 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
4724 return ret;
4725
4568 /* Ignore if event is not for Nearest Bridge */ 4726 /* Ignore if event is not for Nearest Bridge */
4569 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 4727 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4570 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 4728 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
@@ -4606,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4606 if (!need_reconfig) 4764 if (!need_reconfig)
4607 goto exit; 4765 goto exit;
4608 4766
4767 /* Enable DCB tagging only when more than one TC */
4768 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
4769 pf->flags |= I40E_FLAG_DCB_ENABLED;
4770 else
4771 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
4772
4609 /* Reconfiguration needed quiesce all VSIs */ 4773 /* Reconfiguration needed quiesce all VSIs */
4610 i40e_pf_quiesce_all_vsi(pf); 4774 i40e_pf_quiesce_all_vsi(pf);
4611 4775
@@ -4709,8 +4873,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4709 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4873 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4710 return; 4874 return;
4711 fcnt_prog = i40e_get_current_fd_count(pf); 4875 fcnt_prog = i40e_get_current_fd_count(pf);
4712 fcnt_avail = pf->hw.fdir_shared_filter_count + 4876 fcnt_avail = i40e_get_fd_cnt_all(pf);
4713 pf->fdir_pf_filter_count;
4714 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 4877 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4715 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 4878 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4716 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 4879 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4803,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4803 i40e_veb_link_event(pf->veb[i], link_up); 4966 i40e_veb_link_event(pf->veb[i], link_up);
4804 4967
4805 /* ... now the local VSIs */ 4968 /* ... now the local VSIs */
4806 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4969 for (i = 0; i < pf->num_alloc_vsi; i++)
4807 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4970 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4808 i40e_vsi_link_event(pf->vsi[i], link_up); 4971 i40e_vsi_link_event(pf->vsi[i], link_up);
4809} 4972}
@@ -4821,10 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
4821 4984
4822 if (new_link == old_link) 4985 if (new_link == old_link)
4823 return; 4986 return;
4824
4825 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 4987 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4826 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4988 i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
4827 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4828 4989
4829 /* Notify the base of the switch tree connected to 4990 /* Notify the base of the switch tree connected to
4830 * the link. Floating VEBs are not notified. 4991 * the link. Floating VEBs are not notified.
@@ -4862,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
4862 * for each q_vector 5023 * for each q_vector
4863 * force an interrupt 5024 * force an interrupt
4864 */ 5025 */
4865 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5026 for (v = 0; v < pf->num_alloc_vsi; v++) {
4866 struct i40e_vsi *vsi = pf->vsi[v]; 5027 struct i40e_vsi *vsi = pf->vsi[v];
4867 int armed = 0; 5028 int armed = 0;
4868 5029
@@ -4912,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
4912 /* Update the stats for active netdevs so the network stack 5073 /* Update the stats for active netdevs so the network stack
4913 * can look at updated numbers whenever it cares to 5074 * can look at updated numbers whenever it cares to
4914 */ 5075 */
4915 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 5076 for (i = 0; i < pf->num_alloc_vsi; i++)
4916 if (pf->vsi[i] && pf->vsi[i]->netdev) 5077 if (pf->vsi[i] && pf->vsi[i]->netdev)
4917 i40e_update_stats(pf->vsi[i]); 5078 i40e_update_stats(pf->vsi[i]);
4918 5079
@@ -5018,11 +5179,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5018 u16 pending, i = 0; 5179 u16 pending, i = 0;
5019 i40e_status ret; 5180 i40e_status ret;
5020 u16 opcode; 5181 u16 opcode;
5182 u32 oldval;
5021 u32 val; 5183 u32 val;
5022 5184
5023 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 5185 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5024 return; 5186 return;
5025 5187
5188 /* check for error indications */
5189 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5190 oldval = val;
5191 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5192 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5193 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5194 }
5195 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5196 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5197 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5198 }
5199 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5200 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5201 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5202 }
5203 if (oldval != val)
5204 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5205
5206 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5207 oldval = val;
5208 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5209 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5210 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5211 }
5212 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5213 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5214 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5215 }
5216 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5217 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5218 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5219 }
5220 if (oldval != val)
5221 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5222
5026 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5223 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
5027 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5224 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5028 if (!event.msg_buf) 5225 if (!event.msg_buf)
@@ -5128,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5128 int ret; 5325 int ret;
5129 5326
5130 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5327 /* build VSI that owns this VEB, temporarily attached to base VEB */
5131 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 5328 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5132 if (pf->vsi[v] && 5329 if (pf->vsi[v] &&
5133 pf->vsi[v]->veb_idx == veb->idx && 5330 pf->vsi[v]->veb_idx == veb->idx &&
5134 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5331 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5158,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5158 goto end_reconstitute; 5355 goto end_reconstitute;
5159 5356
5160 /* create the remaining VSIs attached to this VEB */ 5357 /* create the remaining VSIs attached to this VEB */
5161 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5358 for (v = 0; v < pf->num_alloc_vsi; v++) {
5162 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5359 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5163 continue; 5360 continue;
5164 5361
@@ -5226,9 +5423,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
5226 } 5423 }
5227 } while (err); 5424 } while (err);
5228 5425
5229 /* increment MSI-X count because current FW skips one */
5230 pf->hw.func_caps.num_msix_vectors++;
5231
5232 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 5426 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5233 (pf->hw.aq.fw_maj_ver < 2)) { 5427 (pf->hw.aq.fw_maj_ver < 2)) {
5234 pf->hw.func_caps.num_msix_vectors++; 5428 pf->hw.func_caps.num_msix_vectors++;
@@ -5267,15 +5461,14 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
5267static void i40e_fdir_sb_setup(struct i40e_pf *pf) 5461static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5268{ 5462{
5269 struct i40e_vsi *vsi; 5463 struct i40e_vsi *vsi;
5270 bool new_vsi = false; 5464 int i;
5271 int err, i;
5272 5465
5273 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5466 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
5274 return; 5467 return;
5275 5468
5276 /* find existing VSI and see if it needs configuring */ 5469 /* find existing VSI and see if it needs configuring */
5277 vsi = NULL; 5470 vsi = NULL;
5278 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5471 for (i = 0; i < pf->num_alloc_vsi; i++) {
5279 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5472 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5280 vsi = pf->vsi[i]; 5473 vsi = pf->vsi[i];
5281 break; 5474 break;
@@ -5288,47 +5481,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5288 pf->vsi[pf->lan_vsi]->seid, 0); 5481 pf->vsi[pf->lan_vsi]->seid, 0);
5289 if (!vsi) { 5482 if (!vsi) {
5290 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 5483 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
5291 goto err_vsi; 5484 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5485 return;
5292 } 5486 }
5293 new_vsi = true;
5294 }
5295 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5296
5297 err = i40e_vsi_setup_tx_resources(vsi);
5298 if (err)
5299 goto err_setup_tx;
5300 err = i40e_vsi_setup_rx_resources(vsi);
5301 if (err)
5302 goto err_setup_rx;
5303
5304 if (new_vsi) {
5305 char int_name[IFNAMSIZ + 9];
5306 err = i40e_vsi_configure(vsi);
5307 if (err)
5308 goto err_setup_rx;
5309 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
5310 dev_driver_string(&pf->pdev->dev));
5311 err = i40e_vsi_request_irq(vsi, int_name);
5312 if (err)
5313 goto err_setup_rx;
5314 err = i40e_up_complete(vsi);
5315 if (err)
5316 goto err_up_complete;
5317 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5318 } 5487 }
5319 5488
5320 return; 5489 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5321
5322err_up_complete:
5323 i40e_down(vsi);
5324 i40e_vsi_free_irq(vsi);
5325err_setup_rx:
5326 i40e_vsi_free_rx_resources(vsi);
5327err_setup_tx:
5328 i40e_vsi_free_tx_resources(vsi);
5329err_vsi:
5330 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5331 i40e_vsi_clear(vsi);
5332} 5490}
5333 5491
5334/** 5492/**
@@ -5340,7 +5498,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5340 int i; 5498 int i;
5341 5499
5342 i40e_fdir_filter_exit(pf); 5500 i40e_fdir_filter_exit(pf);
5343 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5501 for (i = 0; i < pf->num_alloc_vsi; i++) {
5344 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5502 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5345 i40e_vsi_release(pf->vsi[i]); 5503 i40e_vsi_release(pf->vsi[i]);
5346 break; 5504 break;
@@ -5357,7 +5515,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5357static int i40e_prep_for_reset(struct i40e_pf *pf) 5515static int i40e_prep_for_reset(struct i40e_pf *pf)
5358{ 5516{
5359 struct i40e_hw *hw = &pf->hw; 5517 struct i40e_hw *hw = &pf->hw;
5360 i40e_status ret; 5518 i40e_status ret = 0;
5361 u32 v; 5519 u32 v;
5362 5520
5363 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 5521 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
@@ -5366,13 +5524,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5366 5524
5367 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5525 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5368 5526
5369 if (i40e_check_asq_alive(hw))
5370 i40e_vc_notify_reset(pf);
5371
5372 /* quiesce the VSIs and their queues that are not already DOWN */ 5527 /* quiesce the VSIs and their queues that are not already DOWN */
5373 i40e_pf_quiesce_all_vsi(pf); 5528 i40e_pf_quiesce_all_vsi(pf);
5374 5529
5375 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5530 for (v = 0; v < pf->num_alloc_vsi; v++) {
5376 if (pf->vsi[v]) 5531 if (pf->vsi[v])
5377 pf->vsi[v]->seid = 0; 5532 pf->vsi[v]->seid = 0;
5378 } 5533 }
@@ -5380,22 +5535,40 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5380 i40e_shutdown_adminq(&pf->hw); 5535 i40e_shutdown_adminq(&pf->hw);
5381 5536
5382 /* call shutdown HMC */ 5537 /* call shutdown HMC */
5383 ret = i40e_shutdown_lan_hmc(hw); 5538 if (hw->hmc.hmc_obj) {
5384 if (ret) { 5539 ret = i40e_shutdown_lan_hmc(hw);
5385 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); 5540 if (ret) {
5386 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5541 dev_warn(&pf->pdev->dev,
5542 "shutdown_lan_hmc failed: %d\n", ret);
5543 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5544 }
5387 } 5545 }
5388 return ret; 5546 return ret;
5389} 5547}
5390 5548
5391/** 5549/**
5550 * i40e_send_version - update firmware with driver version
5551 * @pf: PF struct
5552 */
5553static void i40e_send_version(struct i40e_pf *pf)
5554{
5555 struct i40e_driver_version dv;
5556
5557 dv.major_version = DRV_VERSION_MAJOR;
5558 dv.minor_version = DRV_VERSION_MINOR;
5559 dv.build_version = DRV_VERSION_BUILD;
5560 dv.subbuild_version = 0;
5561 strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
5562 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5563}
5564
5565/**
5392 * i40e_reset_and_rebuild - reset and rebuild using a saved config 5566 * i40e_reset_and_rebuild - reset and rebuild using a saved config
5393 * @pf: board private structure 5567 * @pf: board private structure
5394 * @reinit: if the Main VSI needs to re-initialized. 5568 * @reinit: if the Main VSI needs to re-initialized.
5395 **/ 5569 **/
5396static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 5570static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5397{ 5571{
5398 struct i40e_driver_version dv;
5399 struct i40e_hw *hw = &pf->hw; 5572 struct i40e_hw *hw = &pf->hw;
5400 i40e_status ret; 5573 i40e_status ret;
5401 u32 v; 5574 u32 v;
@@ -5405,8 +5578,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5405 * because the reset will make them disappear. 5578 * because the reset will make them disappear.
5406 */ 5579 */
5407 ret = i40e_pf_reset(hw); 5580 ret = i40e_pf_reset(hw);
5408 if (ret) 5581 if (ret) {
5409 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5582 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5583 goto end_core_reset;
5584 }
5410 pf->pfr_count++; 5585 pf->pfr_count++;
5411 5586
5412 if (test_bit(__I40E_DOWN, &pf->state)) 5587 if (test_bit(__I40E_DOWN, &pf->state))
@@ -5426,6 +5601,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5426 i40e_verify_eeprom(pf); 5601 i40e_verify_eeprom(pf);
5427 } 5602 }
5428 5603
5604 i40e_clear_pxe_mode(hw);
5429 ret = i40e_get_capabilities(pf); 5605 ret = i40e_get_capabilities(pf);
5430 if (ret) { 5606 if (ret) {
5431 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5607 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5526,13 +5702,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5526 } 5702 }
5527 5703
5528 /* tell the firmware that we're starting */ 5704 /* tell the firmware that we're starting */
5529 dv.major_version = DRV_VERSION_MAJOR; 5705 i40e_send_version(pf);
5530 dv.minor_version = DRV_VERSION_MINOR;
5531 dv.build_version = DRV_VERSION_BUILD;
5532 dv.subbuild_version = 0;
5533 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5534
5535 dev_info(&pf->pdev->dev, "reset complete\n");
5536 5706
5537end_core_reset: 5707end_core_reset:
5538 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5708 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5642,7 +5812,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5642 **/ 5812 **/
5643static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 5813static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5644{ 5814{
5645 const int vxlan_hdr_qwords = 4;
5646 struct i40e_hw *hw = &pf->hw; 5815 struct i40e_hw *hw = &pf->hw;
5647 i40e_status ret; 5816 i40e_status ret;
5648 u8 filter_index; 5817 u8 filter_index;
@@ -5660,7 +5829,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5660 port = pf->vxlan_ports[i]; 5829 port = pf->vxlan_ports[i];
5661 ret = port ? 5830 ret = port ?
5662 i40e_aq_add_udp_tunnel(hw, ntohs(port), 5831 i40e_aq_add_udp_tunnel(hw, ntohs(port),
5663 vxlan_hdr_qwords,
5664 I40E_AQC_TUNNEL_TYPE_VXLAN, 5832 I40E_AQC_TUNNEL_TYPE_VXLAN,
5665 &filter_index, NULL) 5833 &filter_index, NULL)
5666 : i40e_aq_del_udp_tunnel(hw, i, NULL); 5834 : i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5839,15 +6007,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5839 * find next empty vsi slot, looping back around if necessary 6007 * find next empty vsi slot, looping back around if necessary
5840 */ 6008 */
5841 i = pf->next_vsi; 6009 i = pf->next_vsi;
5842 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 6010 while (i < pf->num_alloc_vsi && pf->vsi[i])
5843 i++; 6011 i++;
5844 if (i >= pf->hw.func_caps.num_vsis) { 6012 if (i >= pf->num_alloc_vsi) {
5845 i = 0; 6013 i = 0;
5846 while (i < pf->next_vsi && pf->vsi[i]) 6014 while (i < pf->next_vsi && pf->vsi[i])
5847 i++; 6015 i++;
5848 } 6016 }
5849 6017
5850 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 6018 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
5851 vsi_idx = i; /* Found one! */ 6019 vsi_idx = i; /* Found one! */
5852 } else { 6020 } else {
5853 ret = -ENODEV; 6021 ret = -ENODEV;
@@ -5870,6 +6038,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5870 vsi->netdev_registered = false; 6038 vsi->netdev_registered = false;
5871 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6039 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5872 INIT_LIST_HEAD(&vsi->mac_filter_list); 6040 INIT_LIST_HEAD(&vsi->mac_filter_list);
6041 vsi->irqs_ready = false;
5873 6042
5874 ret = i40e_set_num_rings_in_vsi(vsi); 6043 ret = i40e_set_num_rings_in_vsi(vsi);
5875 if (ret) 6044 if (ret)
@@ -5987,14 +6156,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5987 **/ 6156 **/
5988static int i40e_alloc_rings(struct i40e_vsi *vsi) 6157static int i40e_alloc_rings(struct i40e_vsi *vsi)
5989{ 6158{
6159 struct i40e_ring *tx_ring, *rx_ring;
5990 struct i40e_pf *pf = vsi->back; 6160 struct i40e_pf *pf = vsi->back;
5991 int i; 6161 int i;
5992 6162
5993 /* Set basic values in the rings to be used later during open() */ 6163 /* Set basic values in the rings to be used later during open() */
5994 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6164 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5995 struct i40e_ring *tx_ring;
5996 struct i40e_ring *rx_ring;
5997
5998 /* allocate space for both Tx and Rx in one shot */ 6165 /* allocate space for both Tx and Rx in one shot */
5999 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 6166 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6000 if (!tx_ring) 6167 if (!tx_ring)
@@ -6052,8 +6219,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6052 vectors = 0; 6219 vectors = 0;
6053 } 6220 }
6054 6221
6055 pf->num_msix_entries = vectors;
6056
6057 return vectors; 6222 return vectors;
6058} 6223}
6059 6224
@@ -6107,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
6107 for (i = 0; i < v_budget; i++) 6272 for (i = 0; i < v_budget; i++)
6108 pf->msix_entries[i].entry = i; 6273 pf->msix_entries[i].entry = i;
6109 vec = i40e_reserve_msix_vectors(pf, v_budget); 6274 vec = i40e_reserve_msix_vectors(pf, v_budget);
6275
6276 if (vec != v_budget) {
6277 /* If we have limited resources, we will start with no vectors
6278 * for the special features and then allocate vectors to some
6279 * of these features based on the policy and at the end disable
6280 * the features that did not get any vectors.
6281 */
6282 pf->num_vmdq_msix = 0;
6283 }
6284
6110 if (vec < I40E_MIN_MSIX) { 6285 if (vec < I40E_MIN_MSIX) {
6111 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6286 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6112 kfree(pf->msix_entries); 6287 kfree(pf->msix_entries);
@@ -6115,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
6115 6290
6116 } else if (vec == I40E_MIN_MSIX) { 6291 } else if (vec == I40E_MIN_MSIX) {
6117 /* Adjust for minimal MSIX use */ 6292 /* Adjust for minimal MSIX use */
6118 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
6119 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6120 pf->num_vmdq_vsis = 0; 6293 pf->num_vmdq_vsis = 0;
6121 pf->num_vmdq_qps = 0; 6294 pf->num_vmdq_qps = 0;
6122 pf->num_vmdq_msix = 0;
6123 pf->num_lan_qps = 1; 6295 pf->num_lan_qps = 1;
6124 pf->num_lan_msix = 1; 6296 pf->num_lan_msix = 1;
6125 6297
6126 } else if (vec != v_budget) { 6298 } else if (vec != v_budget) {
6299 /* reserve the misc vector */
6300 vec--;
6301
6127 /* Scale vector usage down */ 6302 /* Scale vector usage down */
6128 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6303 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6129 vec--; /* reserve the misc vector */ 6304 pf->num_vmdq_vsis = 1;
6130 6305
6131 /* partition out the remaining vectors */ 6306 /* partition out the remaining vectors */
6132 switch (vec) { 6307 switch (vec) {
6133 case 2: 6308 case 2:
6134 pf->num_vmdq_vsis = 1;
6135 pf->num_lan_msix = 1; 6309 pf->num_lan_msix = 1;
6136 break; 6310 break;
6137 case 3: 6311 case 3:
6138 pf->num_vmdq_vsis = 1;
6139 pf->num_lan_msix = 2; 6312 pf->num_lan_msix = 2;
6140 break; 6313 break;
6141 default: 6314 default:
@@ -6147,6 +6320,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
6147 } 6320 }
6148 } 6321 }
6149 6322
6323 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6324 (pf->num_vmdq_msix == 0)) {
6325 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6326 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6327 }
6150 return err; 6328 return err;
6151} 6329}
6152 6330
@@ -6171,7 +6349,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
6171 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 6349 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6172 if (vsi->netdev) 6350 if (vsi->netdev)
6173 netif_napi_add(vsi->netdev, &q_vector->napi, 6351 netif_napi_add(vsi->netdev, &q_vector->napi,
6174 i40e_napi_poll, vsi->work_limit); 6352 i40e_napi_poll, NAPI_POLL_WEIGHT);
6175 6353
6176 q_vector->rx.latency_range = I40E_LOW_LATENCY; 6354 q_vector->rx.latency_range = I40E_LOW_LATENCY;
6177 q_vector->tx.latency_range = I40E_LOW_LATENCY; 6355 q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -6231,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6231 if (err) { 6409 if (err) {
6232 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6410 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6233 I40E_FLAG_RSS_ENABLED | 6411 I40E_FLAG_RSS_ENABLED |
6234 I40E_FLAG_DCB_ENABLED | 6412 I40E_FLAG_DCB_CAPABLE |
6235 I40E_FLAG_SRIOV_ENABLED | 6413 I40E_FLAG_SRIOV_ENABLED |
6236 I40E_FLAG_FD_SB_ENABLED | 6414 I40E_FLAG_FD_SB_ENABLED |
6237 I40E_FLAG_FD_ATR_ENABLED | 6415 I40E_FLAG_FD_ATR_ENABLED |
@@ -6364,7 +6542,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6364 return 0; 6542 return 0;
6365 6543
6366 queue_count = min_t(int, queue_count, pf->rss_size_max); 6544 queue_count = min_t(int, queue_count, pf->rss_size_max);
6367 queue_count = rounddown_pow_of_two(queue_count);
6368 6545
6369 if (queue_count != pf->rss_size) { 6546 if (queue_count != pf->rss_size) {
6370 i40e_prep_for_reset(pf); 6547 i40e_prep_for_reset(pf);
@@ -6407,6 +6584,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
6407 I40E_FLAG_MSIX_ENABLED | 6584 I40E_FLAG_MSIX_ENABLED |
6408 I40E_FLAG_RX_1BUF_ENABLED; 6585 I40E_FLAG_RX_1BUF_ENABLED;
6409 6586
6587 /* Set default ITR */
6588 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
6589 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
6590
6410 /* Depending on PF configurations, it is possible that the RSS 6591 /* Depending on PF configurations, it is possible that the RSS
6411 * maximum might end up larger than the available queues 6592 * maximum might end up larger than the available queues
6412 */ 6593 */
@@ -6416,7 +6597,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
6416 if (pf->hw.func_caps.rss) { 6597 if (pf->hw.func_caps.rss) {
6417 pf->flags |= I40E_FLAG_RSS_ENABLED; 6598 pf->flags |= I40E_FLAG_RSS_ENABLED;
6418 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6599 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
6419 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
6420 } else { 6600 } else {
6421 pf->rss_size = 1; 6601 pf->rss_size = 1;
6422 } 6602 }
@@ -6432,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
6432 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6612 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6433 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6613 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6434 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6614 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6615 /* Setup a counter for fd_atr per pf */
6616 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
6435 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6617 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6436 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6618 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6619 /* Setup a counter for fd_sb per pf */
6620 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
6437 } else { 6621 } else {
6438 dev_info(&pf->pdev->dev, 6622 dev_info(&pf->pdev->dev,
6439 "Flow Director Sideband mode Disabled in MFP mode\n"); 6623 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -6649,6 +6833,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
6649} 6833}
6650 6834
6651#endif 6835#endif
6836#ifdef HAVE_FDB_OPS
6837#ifdef USE_CONST_DEV_UC_CHAR
6838static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6839 struct net_device *dev,
6840 const unsigned char *addr,
6841 u16 flags)
6842#else
6843static int i40e_ndo_fdb_add(struct ndmsg *ndm,
6844 struct net_device *dev,
6845 unsigned char *addr,
6846 u16 flags)
6847#endif
6848{
6849 struct i40e_netdev_priv *np = netdev_priv(dev);
6850 struct i40e_pf *pf = np->vsi->back;
6851 int err = 0;
6852
6853 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
6854 return -EOPNOTSUPP;
6855
6856 /* Hardware does not support aging addresses so if a
6857 * ndm_state is given only allow permanent addresses
6858 */
6859 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6860 netdev_info(dev, "FDB only supports static addresses\n");
6861 return -EINVAL;
6862 }
6863
6864 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6865 err = dev_uc_add_excl(dev, addr);
6866 else if (is_multicast_ether_addr(addr))
6867 err = dev_mc_add_excl(dev, addr);
6868 else
6869 err = -EINVAL;
6870
6871 /* Only return duplicate errors if NLM_F_EXCL is set */
6872 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6873 err = 0;
6874
6875 return err;
6876}
6877
6878#ifndef USE_DEFAULT_FDB_DEL_DUMP
6879#ifdef USE_CONST_DEV_UC_CHAR
6880static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6881 struct net_device *dev,
6882 const unsigned char *addr)
6883#else
6884static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6885 struct net_device *dev,
6886 unsigned char *addr)
6887#endif
6888{
6889 struct i40e_netdev_priv *np = netdev_priv(dev);
6890 struct i40e_pf *pf = np->vsi->back;
6891 int err = -EOPNOTSUPP;
6892
6893 if (ndm->ndm_state & NUD_PERMANENT) {
6894 netdev_info(dev, "FDB only supports static addresses\n");
6895 return -EINVAL;
6896 }
6897
6898 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
6899 if (is_unicast_ether_addr(addr))
6900 err = dev_uc_del(dev, addr);
6901 else if (is_multicast_ether_addr(addr))
6902 err = dev_mc_del(dev, addr);
6903 else
6904 err = -EINVAL;
6905 }
6906
6907 return err;
6908}
6909
6910static int i40e_ndo_fdb_dump(struct sk_buff *skb,
6911 struct netlink_callback *cb,
6912 struct net_device *dev,
6913 int idx)
6914{
6915 struct i40e_netdev_priv *np = netdev_priv(dev);
6916 struct i40e_pf *pf = np->vsi->back;
6917
6918 if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
6919 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6920
6921 return idx;
6922}
6923
6924#endif /* USE_DEFAULT_FDB_DEL_DUMP */
6925#endif /* HAVE_FDB_OPS */
6652static const struct net_device_ops i40e_netdev_ops = { 6926static const struct net_device_ops i40e_netdev_ops = {
6653 .ndo_open = i40e_open, 6927 .ndo_open = i40e_open,
6654 .ndo_stop = i40e_close, 6928 .ndo_stop = i40e_close,
@@ -6669,13 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
6669 .ndo_set_features = i40e_set_features, 6943 .ndo_set_features = i40e_set_features,
6670 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 6944 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
6671 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6945 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6672 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6946 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
6673 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6947 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6674 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 6948 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6949 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
6675#ifdef CONFIG_I40E_VXLAN 6950#ifdef CONFIG_I40E_VXLAN
6676 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6951 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6677 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6952 .ndo_del_vxlan_port = i40e_del_vxlan_port,
6678#endif 6953#endif
6954#ifdef HAVE_FDB_OPS
6955 .ndo_fdb_add = i40e_ndo_fdb_add,
6956#ifndef USE_DEFAULT_FDB_DEL_DUMP
6957 .ndo_fdb_del = i40e_ndo_fdb_del,
6958 .ndo_fdb_dump = i40e_ndo_fdb_dump,
6959#endif
6960#endif
6679}; 6961};
6680 6962
6681/** 6963/**
@@ -6720,16 +7002,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6720 NETIF_F_TSO_ECN | 7002 NETIF_F_TSO_ECN |
6721 NETIF_F_TSO6 | 7003 NETIF_F_TSO6 |
6722 NETIF_F_RXCSUM | 7004 NETIF_F_RXCSUM |
6723 NETIF_F_NTUPLE |
6724 NETIF_F_RXHASH | 7005 NETIF_F_RXHASH |
6725 0; 7006 0;
6726 7007
7008 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7009 netdev->features |= NETIF_F_NTUPLE;
7010
6727 /* copy netdev features into list of user selectable features */ 7011 /* copy netdev features into list of user selectable features */
6728 netdev->hw_features |= netdev->features; 7012 netdev->hw_features |= netdev->features;
6729 7013
6730 if (vsi->type == I40E_VSI_MAIN) { 7014 if (vsi->type == I40E_VSI_MAIN) {
6731 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 7015 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6732 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); 7016 ether_addr_copy(mac_addr, hw->mac.perm_addr);
7017 /* The following two steps are necessary to prevent reception
7018 * of tagged packets - by default the NVM loads a MAC-VLAN
7019 * filter that will accept any tagged packet. This is to
7020 * prevent that during normal operations until a specific
7021 * VLAN tag filter has been set.
7022 */
7023 i40e_rm_default_mac_filter(vsi, mac_addr);
7024 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
6733 } else { 7025 } else {
6734 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 7026 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6735 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 7027 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -6739,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6739 } 7031 }
6740 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 7032 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
6741 7033
6742 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 7034 ether_addr_copy(netdev->dev_addr, mac_addr);
6743 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); 7035 ether_addr_copy(netdev->perm_addr, mac_addr);
6744 /* vlan gets same features (except vlan offload) 7036 /* vlan gets same features (except vlan offload)
6745 * after any tweaks for specific VSI types 7037 * after any tweaks for specific VSI types
6746 */ 7038 */
@@ -6772,7 +7064,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
6772 return; 7064 return;
6773 7065
6774 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 7066 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
6775 return;
6776} 7067}
6777 7068
6778/** 7069/**
@@ -6898,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
6898 7189
6899 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 7190 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6900 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 7191 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7192 if (pf->vf[vsi->vf_id].spoofchk) {
7193 ctxt.info.valid_sections |=
7194 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7195 ctxt.info.sec_flags |=
7196 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7197 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7198 }
6901 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7199 /* Setup the VSI tx/rx queue map for TC0 only for now */
6902 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7200 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6903 break; 7201 break;
@@ -6982,11 +7280,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6982 unregister_netdev(vsi->netdev); 7280 unregister_netdev(vsi->netdev);
6983 } 7281 }
6984 } else { 7282 } else {
6985 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 7283 i40e_vsi_close(vsi);
6986 i40e_down(vsi);
6987 i40e_vsi_free_irq(vsi);
6988 i40e_vsi_free_tx_resources(vsi);
6989 i40e_vsi_free_rx_resources(vsi);
6990 } 7284 }
6991 i40e_vsi_disable_irq(vsi); 7285 i40e_vsi_disable_irq(vsi);
6992 } 7286 }
@@ -7013,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
7013 * the orphan VEBs yet. We'll wait for an explicit remove request 7307 * the orphan VEBs yet. We'll wait for an explicit remove request
7014 * from up the network stack. 7308 * from up the network stack.
7015 */ 7309 */
7016 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7310 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
7017 if (pf->vsi[i] && 7311 if (pf->vsi[i] &&
7018 pf->vsi[i]->uplink_seid == uplink_seid && 7312 pf->vsi[i]->uplink_seid == uplink_seid &&
7019 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7313 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7192,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7192 7486
7193 if (!veb && uplink_seid != pf->mac_seid) { 7487 if (!veb && uplink_seid != pf->mac_seid) {
7194 7488
7195 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7489 for (i = 0; i < pf->num_alloc_vsi; i++) {
7196 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 7490 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7197 vsi = pf->vsi[i]; 7491 vsi = pf->vsi[i];
7198 break; 7492 break;
@@ -7435,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
7435 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 7729 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7436 * the VEB itself, so don't use (*branch) after this loop. 7730 * the VEB itself, so don't use (*branch) after this loop.
7437 */ 7731 */
7438 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7732 for (i = 0; i < pf->num_alloc_vsi; i++) {
7439 if (!pf->vsi[i]) 7733 if (!pf->vsi[i])
7440 continue; 7734 continue;
7441 if (pf->vsi[i]->uplink_seid == branch_seid && 7735 if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7487,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
7487 pf = veb->pf; 7781 pf = veb->pf;
7488 7782
7489 /* find the remaining VSI and check for extras */ 7783 /* find the remaining VSI and check for extras */
7490 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7784 for (i = 0; i < pf->num_alloc_vsi; i++) {
7491 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 7785 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7492 n++; 7786 n++;
7493 vsi = pf->vsi[i]; 7787 vsi = pf->vsi[i];
@@ -7516,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
7516 7810
7517 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 7811 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7518 i40e_veb_clear(veb); 7812 i40e_veb_clear(veb);
7519
7520 return;
7521} 7813}
7522 7814
7523/** 7815/**
@@ -7601,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7601 } 7893 }
7602 7894
7603 /* make sure there is such a vsi and uplink */ 7895 /* make sure there is such a vsi and uplink */
7604 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 7896 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
7605 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 7897 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7606 break; 7898 break;
7607 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 7899 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
7608 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 7900 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7609 vsi_seid); 7901 vsi_seid);
7610 return NULL; 7902 return NULL;
@@ -7639,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7639 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 7931 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7640 if (ret) 7932 if (ret)
7641 goto err_veb; 7933 goto err_veb;
7934 if (vsi_idx == pf->lan_vsi)
7935 pf->lan_veb = veb->idx;
7642 7936
7643 return veb; 7937 return veb;
7644 7938
@@ -7774,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7774 "header: %d reported %d total\n", 8068 "header: %d reported %d total\n",
7775 num_reported, num_total); 8069 num_reported, num_total);
7776 8070
7777 if (num_reported) {
7778 int sz = sizeof(*sw_config) * num_reported;
7779
7780 kfree(pf->sw_config);
7781 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7782 if (pf->sw_config)
7783 memcpy(pf->sw_config, sw_config, sz);
7784 }
7785
7786 for (i = 0; i < num_reported; i++) { 8071 for (i = 0; i < num_reported; i++) {
7787 struct i40e_aqc_switch_config_element_resp *ele = 8072 struct i40e_aqc_switch_config_element_resp *ele =
7788 &sw_config->element[i]; 8073 &sw_config->element[i];
@@ -7949,9 +8234,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7949 queues_left = pf->hw.func_caps.num_tx_qp; 8234 queues_left = pf->hw.func_caps.num_tx_qp;
7950 8235
7951 if ((queues_left == 1) || 8236 if ((queues_left == 1) ||
7952 !(pf->flags & I40E_FLAG_MSIX_ENABLED) || 8237 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7953 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
7954 I40E_FLAG_DCB_ENABLED))) {
7955 /* one qp for PF, no queues for anything else */ 8238 /* one qp for PF, no queues for anything else */
7956 queues_left = 0; 8239 queues_left = 0;
7957 pf->rss_size = pf->num_lan_qps = 1; 8240 pf->rss_size = pf->num_lan_qps = 1;
@@ -7960,14 +8243,27 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7960 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8243 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
7961 I40E_FLAG_FD_SB_ENABLED | 8244 I40E_FLAG_FD_SB_ENABLED |
7962 I40E_FLAG_FD_ATR_ENABLED | 8245 I40E_FLAG_FD_ATR_ENABLED |
7963 I40E_FLAG_DCB_ENABLED | 8246 I40E_FLAG_DCB_CAPABLE |
7964 I40E_FLAG_SRIOV_ENABLED | 8247 I40E_FLAG_SRIOV_ENABLED |
7965 I40E_FLAG_VMDQ_ENABLED); 8248 I40E_FLAG_VMDQ_ENABLED);
8249 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8250 I40E_FLAG_FD_SB_ENABLED |
8251 I40E_FLAG_FD_ATR_ENABLED |
8252 I40E_FLAG_DCB_CAPABLE))) {
8253 /* one qp for PF */
8254 pf->rss_size = pf->num_lan_qps = 1;
8255 queues_left -= pf->num_lan_qps;
8256
8257 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8258 I40E_FLAG_FD_SB_ENABLED |
8259 I40E_FLAG_FD_ATR_ENABLED |
8260 I40E_FLAG_DCB_ENABLED |
8261 I40E_FLAG_VMDQ_ENABLED);
7966 } else { 8262 } else {
7967 /* Not enough queues for all TCs */ 8263 /* Not enough queues for all TCs */
7968 if ((pf->flags & I40E_FLAG_DCB_ENABLED) && 8264 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
7969 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 8265 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
7970 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8266 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7971 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 8267 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
7972 } 8268 }
7973 pf->num_lan_qps = pf->rss_size_max; 8269 pf->num_lan_qps = pf->rss_size_max;
@@ -7998,7 +8294,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7998 } 8294 }
7999 8295
8000 pf->queues_left = queues_left; 8296 pf->queues_left = queues_left;
8001 return;
8002} 8297}
8003 8298
8004/** 8299/**
@@ -8055,12 +8350,13 @@ static void i40e_print_features(struct i40e_pf *pf)
8055 8350
8056 if (pf->flags & I40E_FLAG_RSS_ENABLED) 8351 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8057 buf += sprintf(buf, "RSS "); 8352 buf += sprintf(buf, "RSS ");
8058 buf += sprintf(buf, "FDir ");
8059 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 8353 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8060 buf += sprintf(buf, "ATR "); 8354 buf += sprintf(buf, "FD_ATR ");
8061 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 8355 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8356 buf += sprintf(buf, "FD_SB ");
8062 buf += sprintf(buf, "NTUPLE "); 8357 buf += sprintf(buf, "NTUPLE ");
8063 if (pf->flags & I40E_FLAG_DCB_ENABLED) 8358 }
8359 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
8064 buf += sprintf(buf, "DCB "); 8360 buf += sprintf(buf, "DCB ");
8065 if (pf->flags & I40E_FLAG_PTP) 8361 if (pf->flags & I40E_FLAG_PTP)
8066 buf += sprintf(buf, "PTP "); 8362 buf += sprintf(buf, "PTP ");
@@ -8083,13 +8379,13 @@ static void i40e_print_features(struct i40e_pf *pf)
8083 **/ 8379 **/
8084static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 8380static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8085{ 8381{
8086 struct i40e_driver_version dv;
8087 struct i40e_pf *pf; 8382 struct i40e_pf *pf;
8088 struct i40e_hw *hw; 8383 struct i40e_hw *hw;
8089 static u16 pfs_found; 8384 static u16 pfs_found;
8090 u16 link_status; 8385 u16 link_status;
8091 int err = 0; 8386 int err = 0;
8092 u32 len; 8387 u32 len;
8388 u32 i;
8093 8389
8094 err = pci_enable_device_mem(pdev); 8390 err = pci_enable_device_mem(pdev);
8095 if (err) 8391 if (err)
@@ -8201,6 +8497,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8201 8497
8202 i40e_verify_eeprom(pf); 8498 i40e_verify_eeprom(pf);
8203 8499
8500 /* Rev 0 hardware was never productized */
8501 if (hw->revision_id < 1)
8502 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
8503
8204 i40e_clear_pxe_mode(hw); 8504 i40e_clear_pxe_mode(hw);
8205 err = i40e_get_capabilities(pf); 8505 err = i40e_get_capabilities(pf);
8206 if (err) 8506 if (err)
@@ -8234,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8234 goto err_mac_addr; 8534 goto err_mac_addr;
8235 } 8535 }
8236 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 8536 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
8237 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); 8537 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
8238 8538
8239 pci_set_drvdata(pdev, pf); 8539 pci_set_drvdata(pdev, pf);
8240 pci_save_state(pdev); 8540 pci_save_state(pdev);
@@ -8242,8 +8542,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8242 err = i40e_init_pf_dcb(pf); 8542 err = i40e_init_pf_dcb(pf);
8243 if (err) { 8543 if (err) {
8244 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 8544 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8245 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8545 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
8246 goto err_init_dcb; 8546 /* Continue without DCB enabled */
8247 } 8547 }
8248#endif /* CONFIG_I40E_DCB */ 8548#endif /* CONFIG_I40E_DCB */
8249 8549
@@ -8264,10 +8564,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8264 i40e_determine_queue_usage(pf); 8564 i40e_determine_queue_usage(pf);
8265 i40e_init_interrupt_scheme(pf); 8565 i40e_init_interrupt_scheme(pf);
8266 8566
8267 /* Set up the *vsi struct based on the number of VSIs in the HW, 8567 /* The number of VSIs reported by the FW is the minimum guaranteed
8268 * and set up our local tracking of the MAIN PF vsi. 8568 * to us; HW supports far more and we share the remaining pool with
8569 * the other PFs. We allocate space for more than the guarantee with
8570 * the understanding that we might not get them all later.
8269 */ 8571 */
8270 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 8572 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
8573 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
8574 else
8575 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
8576
8577 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
8578 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
8271 pf->vsi = kzalloc(len, GFP_KERNEL); 8579 pf->vsi = kzalloc(len, GFP_KERNEL);
8272 if (!pf->vsi) { 8580 if (!pf->vsi) {
8273 err = -ENOMEM; 8581 err = -ENOMEM;
@@ -8279,6 +8587,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8279 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 8587 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8280 goto err_vsis; 8588 goto err_vsis;
8281 } 8589 }
8590 /* if FDIR VSI was set up, start it now */
8591 for (i = 0; i < pf->num_alloc_vsi; i++) {
8592 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8593 i40e_vsi_open(pf->vsi[i]);
8594 break;
8595 }
8596 }
8282 8597
8283 /* The main driver is (mostly) up and happy. We need to set this state 8598 /* The main driver is (mostly) up and happy. We need to set this state
8284 * before setting up the misc vector or we get a race and the vector 8599 * before setting up the misc vector or we get a race and the vector
@@ -8300,6 +8615,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8300 } 8615 }
8301 } 8616 }
8302 8617
8618#ifdef CONFIG_PCI_IOV
8303 /* prep for VF support */ 8619 /* prep for VF support */
8304 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8620 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8305 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 8621 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8322,17 +8638,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8322 err); 8638 err);
8323 } 8639 }
8324 } 8640 }
8641#endif /* CONFIG_PCI_IOV */
8325 8642
8326 pfs_found++; 8643 pfs_found++;
8327 8644
8328 i40e_dbg_pf_init(pf); 8645 i40e_dbg_pf_init(pf);
8329 8646
8330 /* tell the firmware that we're starting */ 8647 /* tell the firmware that we're starting */
8331 dv.major_version = DRV_VERSION_MAJOR; 8648 i40e_send_version(pf);
8332 dv.minor_version = DRV_VERSION_MINOR;
8333 dv.build_version = DRV_VERSION_BUILD;
8334 dv.subbuild_version = 0;
8335 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
8336 8649
8337 /* since everything's happy, start the service_task timer */ 8650 /* since everything's happy, start the service_task timer */
8338 mod_timer(&pf->service_timer, 8651 mod_timer(&pf->service_timer,
@@ -8373,9 +8686,6 @@ err_vsis:
8373err_switch_setup: 8686err_switch_setup:
8374 i40e_reset_interrupt_capability(pf); 8687 i40e_reset_interrupt_capability(pf);
8375 del_timer_sync(&pf->service_timer); 8688 del_timer_sync(&pf->service_timer);
8376#ifdef CONFIG_I40E_DCB
8377err_init_dcb:
8378#endif /* CONFIG_I40E_DCB */
8379err_mac_addr: 8689err_mac_addr:
8380err_configure_lan_hmc: 8690err_configure_lan_hmc:
8381 (void)i40e_shutdown_lan_hmc(hw); 8691 (void)i40e_shutdown_lan_hmc(hw);
@@ -8456,10 +8766,13 @@ static void i40e_remove(struct pci_dev *pdev)
8456 } 8766 }
8457 8767
8458 /* shutdown and destroy the HMC */ 8768 /* shutdown and destroy the HMC */
8459 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 8769 if (pf->hw.hmc.hmc_obj) {
8460 if (ret_code) 8770 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8461 dev_warn(&pdev->dev, 8771 if (ret_code)
8462 "Failed to destroy the HMC resources: %d\n", ret_code); 8772 dev_warn(&pdev->dev,
8773 "Failed to destroy the HMC resources: %d\n",
8774 ret_code);
8775 }
8463 8776
8464 /* shutdown the adminq */ 8777 /* shutdown the adminq */
8465 ret_code = i40e_shutdown_adminq(&pf->hw); 8778 ret_code = i40e_shutdown_adminq(&pf->hw);
@@ -8470,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
8470 8783
8471 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 8784 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8472 i40e_clear_interrupt_scheme(pf); 8785 i40e_clear_interrupt_scheme(pf);
8473 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8786 for (i = 0; i < pf->num_alloc_vsi; i++) {
8474 if (pf->vsi[i]) { 8787 if (pf->vsi[i]) {
8475 i40e_vsi_clear_rings(pf->vsi[i]); 8788 i40e_vsi_clear_rings(pf->vsi[i]);
8476 i40e_vsi_clear(pf->vsi[i]); 8789 i40e_vsi_clear(pf->vsi[i]);
@@ -8485,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
8485 8798
8486 kfree(pf->qp_pile); 8799 kfree(pf->qp_pile);
8487 kfree(pf->irq_pile); 8800 kfree(pf->irq_pile);
8488 kfree(pf->sw_config);
8489 kfree(pf->vsi); 8801 kfree(pf->vsi);
8490 8802
8491 /* force a PF reset to clean anything leftover */ 8803 /* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..a430699c41d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,10 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
70 u16 *fw_major_version, u16 *fw_minor_version, 70 u16 *fw_major_version, u16 *fw_minor_version,
71 u16 *api_major_version, u16 *api_minor_version, 71 u16 *api_major_version, u16 *api_minor_version,
72 struct i40e_asq_cmd_details *cmd_details); 72 struct i40e_asq_cmd_details *cmd_details);
73i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw, 73i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
74 struct i40e_asq_cmd_details *cmd_details); 74 struct i40e_asq_cmd_details *cmd_details);
75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
76 struct i40e_asq_cmd_details *cmd_details); 76 struct i40e_asq_cmd_details *cmd_details);
77i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
78 struct i40e_asq_cmd_details *cmd_details);
77i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 79i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
78 struct i40e_asq_cmd_details *cmd_details); 80 struct i40e_asq_cmd_details *cmd_details);
79i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 81i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
157i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, 159i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
158 struct i40e_asq_cmd_details *cmd_details); 160 struct i40e_asq_cmd_details *cmd_details);
159i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 161i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
160 u16 udp_port, u8 header_len, 162 u16 udp_port, u8 protocol_index,
161 u8 protocol_index, u8 *filter_index, 163 u8 *filter_index,
162 struct i40e_asq_cmd_details *cmd_details); 164 struct i40e_asq_cmd_details *cmd_details);
163i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 165i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
164 struct i40e_asq_cmd_details *cmd_details); 166 struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +169,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
167i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 169i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
168 u16 flags, u8 *mac_addr, 170 u16 flags, u8 *mac_addr,
169 struct i40e_asq_cmd_details *cmd_details); 171 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
173 u16 seid, u16 credit, u8 max_credit,
174 struct i40e_asq_cmd_details *cmd_details);
170i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 175i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
171 struct i40e_asq_cmd_details *cmd_details); 176 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, 177i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
@@ -216,6 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
216i40e_status i40e_get_mac_addr(struct i40e_hw *hw, 221i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
217 u8 *mac_addr); 222 u8 *mac_addr);
218i40e_status i40e_validate_mac_addr(u8 *mac_addr); 223i40e_status i40e_validate_mac_addr(u8 *mac_addr);
224void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
219/* prototype for functions used for NVM access */ 225/* prototype for functions used for NVM access */
220i40e_status i40e_init_nvm(struct i40e_hw *hw); 226i40e_status i40e_init_nvm(struct i40e_hw *hw);
221i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 227i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..101f439acda6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48,7 +48,6 @@
48 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 48 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
49#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ 49#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
50 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 50 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
51#define I40E_PTP_TX_TIMEOUT (HZ * 15)
52 51
53/** 52/**
54 * i40e_ptp_read - Read the PHC time from the device 53 * i40e_ptp_read - Read the PHC time from the device
@@ -217,40 +216,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
217} 216}
218 217
219/** 218/**
220 * i40e_ptp_tx_work
221 * @work: pointer to work struct
222 *
223 * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
224 * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
225 * the stack in the skb.
226 */
227static void i40e_ptp_tx_work(struct work_struct *work)
228{
229 struct i40e_pf *pf = container_of(work, struct i40e_pf,
230 ptp_tx_work);
231 struct i40e_hw *hw = &pf->hw;
232 u32 prttsyn_stat_0;
233
234 if (!pf->ptp_tx_skb)
235 return;
236
237 if (time_is_before_jiffies(pf->ptp_tx_start +
238 I40E_PTP_TX_TIMEOUT)) {
239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return;
244 }
245
246 prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
247 if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
248 i40e_ptp_tx_hwtstamp(pf);
249 else
250 schedule_work(&pf->ptp_tx_work);
251}
252
253/**
254 * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem 219 * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
255 * @ptp: The PTP clock structure 220 * @ptp: The PTP clock structure
256 * @rq: The requested feature to change 221 * @rq: The requested feature to change
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
608 u32 regval; 573 u32 regval;
609 574
610 spin_lock_init(&pf->tmreg_lock); 575 spin_lock_init(&pf->tmreg_lock);
611 INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
612 576
613 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, 577 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
614 netdev->name); 578 netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
647 pf->ptp_tx = false; 611 pf->ptp_tx = false;
648 pf->ptp_rx = false; 612 pf->ptp_rx = false;
649 613
650 cancel_work_sync(&pf->ptp_tx_work);
651 if (pf->ptp_tx_skb) { 614 if (pf->ptp_tx_skb) {
652 dev_kfree_skb_any(pf->ptp_tx_skb); 615 dev_kfree_skb_any(pf->ptp_tx_skb);
653 pf->ptp_tx_skb = NULL; 616 pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1d40f425acf1..947de98500f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1340,8 +1340,6 @@
1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) 1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) 1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1343#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1344#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1345#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1346#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) 1344#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1347#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 1345#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1367,8 +1365,6 @@
1367#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) 1365#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1368#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1369#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) 1367#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1370#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1371#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1372#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 1368#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1373#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) 1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1374#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 1370#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1589,6 +1585,14 @@
1589#define I40E_GLLAN_TSOMSK_M 0x000442DC 1585#define I40E_GLLAN_TSOMSK_M 0x000442DC
1590#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 1586#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1591#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) 1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1588#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
1589#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
1590#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
1591#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
1592#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
1593#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
1594#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
1595
1592#define I40E_PFLAN_QALLOC 0x001C0400 1596#define I40E_PFLAN_QALLOC 0x001C0400
1593#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1597#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1594#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1598#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..e49f31dbd5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include <linux/prefetch.h>
27#include "i40e.h" 28#include "i40e.h"
28#include "i40e_prototype.h" 29#include "i40e_prototype.h"
29 30
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
61 62
62 /* find existing FDIR VSI */ 63 /* find existing FDIR VSI */
63 vsi = NULL; 64 vsi = NULL;
64 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 65 for (i = 0; i < pf->num_alloc_vsi; i++)
65 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) 66 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
66 vsi = pf->vsi[i]; 67 vsi = pf->vsi[i];
67 if (!vsi) 68 if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
120 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 121 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
121 dcc |= ((u32)fdir_data->cnt_index << 122 dcc |= ((u32)fdir_data->cnt_index <<
122 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 123 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
123 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 124 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
124 } 125 }
125 126
126 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); 127 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
183 struct iphdr *ip; 184 struct iphdr *ip;
184 bool err = false; 185 bool err = false;
185 int ret; 186 int ret;
186 int i;
187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, 188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
199 ip->saddr = fd_data->src_ip[0]; 199 ip->saddr = fd_data->src_ip[0];
200 udp->source = fd_data->src_port; 200 udp->source = fd_data->src_port;
201 201
202 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; 202 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
203 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { 203 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
204 fd_data->pctype = i; 204 if (ret) {
205 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 205 dev_info(&pf->pdev->dev,
206 206 "Filter command send failed for PCTYPE %d (ret = %d)\n",
207 if (ret) { 207 fd_data->pctype, ret);
208 dev_info(&pf->pdev->dev, 208 err = true;
209 "Filter command send failed for PCTYPE %d (ret = %d)\n", 209 } else {
210 fd_data->pctype, ret); 210 dev_info(&pf->pdev->dev,
211 err = true; 211 "Filter OK for PCTYPE %d (ret = %d)\n",
212 } else { 212 fd_data->pctype, ret);
213 dev_info(&pf->pdev->dev,
214 "Filter OK for PCTYPE %d (ret = %d)\n",
215 fd_data->pctype, ret);
216 }
217 } 213 }
218 214
219 return err ? -EOPNOTSUPP : 0; 215 return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
262 } 258 }
263 } 259 }
264 260
265 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; 261 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
266 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 262 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
267 263
268 if (ret) { 264 if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 451
456 /* filter programming failed most likely due to table full */ 452 /* filter programming failed most likely due to table full */
457 fcnt_prog = i40e_get_current_fd_count(pf); 453 fcnt_prog = i40e_get_current_fd_count(pf);
458 fcnt_avail = pf->hw.fdir_shared_filter_count + 454 fcnt_avail = i40e_get_fd_cnt_all(pf);
459 pf->fdir_pf_filter_count;
460
461 /* If ATR is running fcnt_prog can quickly change, 455 /* If ATR is running fcnt_prog can quickly change,
462 * if we are very close to full, it makes sense to disable 456 * if we are very close to full, it makes sense to disable
463 * FD ATR/SB and then re-enable it when there is room. 457 * FD ATR/SB and then re-enable it when there is room.
464 */ 458 */
465 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 459 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
466 /* Turn off ATR first */ 460 /* Turn off ATR first */
467 if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) { 461 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
468 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 462 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
469 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); 463 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
470 pf->auto_disable_flags |= 464 pf->auto_disable_flags |=
471 I40E_FLAG_FD_ATR_ENABLED; 465 I40E_FLAG_FD_ATR_ENABLED;
472 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 466 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
473 } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) { 467 } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
474 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 468 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
475 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 469 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
476 pf->auto_disable_flags |= 470 pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1199 u32 rx_error, 1193 u32 rx_error,
1200 u16 rx_ptype) 1194 u16 rx_ptype)
1201{ 1195{
1196 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1197 bool ipv4 = false, ipv6 = false;
1202 bool ipv4_tunnel, ipv6_tunnel; 1198 bool ipv4_tunnel, ipv6_tunnel;
1203 __wsum rx_udp_csum; 1199 __wsum rx_udp_csum;
1204 __sum16 csum;
1205 struct iphdr *iph; 1200 struct iphdr *iph;
1201 __sum16 csum;
1206 1202
1207 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 1203 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1208 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 1204 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1213 skb->ip_summed = CHECKSUM_NONE; 1209 skb->ip_summed = CHECKSUM_NONE;
1214 1210
1215 /* Rx csum enabled and ip headers found? */ 1211 /* Rx csum enabled and ip headers found? */
1216 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 1212 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1217 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1213 return;
1214
1215 /* did the hardware decode the packet and checksum? */
1216 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1217 return;
1218
1219 /* both known and outer_ip must be set for the below code to work */
1220 if (!(decoded.known && decoded.outer_ip))
1218 return; 1221 return;
1219 1222
1223 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1224 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1225 ipv4 = true;
1226 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1227 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1228 ipv6 = true;
1229
1230 if (ipv4 &&
1231 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1232 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1233 goto checksum_fail;
1234
1220 /* likely incorrect csum if alternate IP extension headers found */ 1235 /* likely incorrect csum if alternate IP extension headers found */
1221 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1236 if (ipv6 &&
1237 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
1238 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
1239 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1240 /* don't increment checksum err here, non-fatal err */
1222 return; 1241 return;
1223 1242
1224 /* IP or L4 or outmost IP checksum error */ 1243 /* there was some L4 error, count error and punt packet to the stack */
1225 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 1244 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
1226 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 1245 goto checksum_fail;
1227 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 1246
1228 vsi->back->hw_csum_rx_error++; 1247 /* handle packets that were not able to be checksummed due
1248 * to arrival speed, in this case the stack can compute
1249 * the csum.
1250 */
1251 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
1229 return; 1252 return;
1230 }
1231 1253
1254 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1255 * it in the driver, hardware does not do it for us.
1256 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1257 * so the total length of IPv4 header is IHL*4 bytes
1258 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1259 */
1232 if (ipv4_tunnel && 1260 if (ipv4_tunnel &&
1261 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
1233 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 1262 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1234 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1235 * it in the driver, hardware does not do it for us.
1236 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1237 * so the total length of IPv4 header is IHL*4 bytes
1238 */
1239 skb->transport_header = skb->mac_header + 1263 skb->transport_header = skb->mac_header +
1240 sizeof(struct ethhdr) + 1264 sizeof(struct ethhdr) +
1241 (ip_hdr(skb)->ihl * 4); 1265 (ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1252 (skb->len - skb_transport_offset(skb)), 1276 (skb->len - skb_transport_offset(skb)),
1253 IPPROTO_UDP, rx_udp_csum); 1277 IPPROTO_UDP, rx_udp_csum);
1254 1278
1255 if (udp_hdr(skb)->check != csum) { 1279 if (udp_hdr(skb)->check != csum)
1256 vsi->back->hw_csum_rx_error++; 1280 goto checksum_fail;
1257 return;
1258 }
1259 } 1281 }
1260 1282
1261 skb->ip_summed = CHECKSUM_UNNECESSARY; 1283 skb->ip_summed = CHECKSUM_UNNECESSARY;
1284
1285 return;
1286
1287checksum_fail:
1288 vsi->back->hw_csum_rx_error++;
1262} 1289}
1263 1290
1264/** 1291/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1435 /* ERR_MASK will only have valid bits if EOP set */ 1462 /* ERR_MASK will only have valid bits if EOP set */
1436 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 1463 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1437 dev_kfree_skb_any(skb); 1464 dev_kfree_skb_any(skb);
1465 /* TODO: shouldn't we increment a counter indicating the
1466 * drop?
1467 */
1438 goto next_desc; 1468 goto next_desc;
1439 } 1469 }
1440 1470
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1665 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 1695 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1666 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 1696 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1667 1697
1698 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
1699 dtype_cmd |=
1700 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1701 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
1702
1668 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 1703 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
1669 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 1704 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1670} 1705}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1825 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 1860 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
1826 I40E_TXD_CTX_QW1_CMD_SHIFT; 1861 I40E_TXD_CTX_QW1_CMD_SHIFT;
1827 1862
1828 pf->ptp_tx_start = jiffies;
1829 schedule_work(&pf->ptp_tx_work);
1830
1831 return 1; 1863 return 1;
1832} 1864}
1833 1865
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2179static int i40e_xmit_descriptor_count(struct sk_buff *skb, 2211static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2180 struct i40e_ring *tx_ring) 2212 struct i40e_ring *tx_ring)
2181{ 2213{
2182#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2183 unsigned int f; 2214 unsigned int f;
2184#endif
2185 int count = 0; 2215 int count = 0;
2186 2216
2187 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 2217 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2190 * + 1 desc for context descriptor, 2220 * + 1 desc for context descriptor,
2191 * otherwise try next time 2221 * otherwise try next time
2192 */ 2222 */
2193#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2194 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2223 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2195 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2224 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2196#else 2225
2197 count += skb_shinfo(skb)->nr_frags;
2198#endif
2199 count += TXD_USE_COUNT(skb_headlen(skb)); 2226 count += TXD_USE_COUNT(skb_headlen(skb));
2200 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 2227 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2201 tx_ring->tx_stats.tx_busy++; 2228 tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d5349698e513..0277894fe1c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,7 +27,7 @@
27#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
29 29
30/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
31 31
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
69 69
70/* Supported RSS offloads */ 70/* Supported RSS offloads */
71#define I40E_DEFAULT_RSS_HENA ( \ 71#define I40E_DEFAULT_RSS_HENA ( \
72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
76 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
79 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
80 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
82 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
83 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ 78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
84 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -122,11 +117,11 @@ enum i40e_dyn_idx_t {
122#define i40e_rx_desc i40e_32byte_rx_desc 117#define i40e_rx_desc i40e_32byte_rx_desc
123 118
124#define I40E_MIN_TX_LEN 17 119#define I40E_MIN_TX_LEN 17
125#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ 120#define I40E_MAX_DATA_PER_TXD 8192
126 121
127/* Tx Descriptors needed, worst case */ 122/* Tx Descriptors needed, worst case */
128#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 123#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
129#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 124#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
130 125
131#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
132#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -184,7 +179,6 @@ enum i40e_ring_state_t {
184 __I40E_TX_DETECT_HANG, 179 __I40E_TX_DETECT_HANG,
185 __I40E_HANG_CHECK_ARMED, 180 __I40E_HANG_CHECK_ARMED,
186 __I40E_RX_PS_ENABLED, 181 __I40E_RX_PS_ENABLED,
187 __I40E_RX_LRO_ENABLED,
188 __I40E_RX_16BYTE_DESC_ENABLED, 182 __I40E_RX_16BYTE_DESC_ENABLED,
189}; 183};
190 184
@@ -200,12 +194,6 @@ enum i40e_ring_state_t {
200 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 194 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
201#define clear_check_for_tx_hang(ring) \ 195#define clear_check_for_tx_hang(ring) \
202 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 196 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
203#define ring_is_lro_enabled(ring) \
204 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
205#define set_ring_lro_enabled(ring) \
206 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
207#define clear_ring_lro_enabled(ring) \
208 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
209#define ring_is_16byte_desc_enabled(ring) \ 197#define ring_is_16byte_desc_enabled(ring) \
210 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) 198 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
211#define set_ring_16byte_desc_enabled(ring) \ 199#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..9d39ff23c5fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,12 +36,10 @@
36 36
37/* Device IDs */ 37/* Device IDs */
38#define I40E_DEV_ID_SFP_XL710 0x1572 38#define I40E_DEV_ID_SFP_XL710 0x1572
39#define I40E_DEV_ID_SFP_X710 0x1573
40#define I40E_DEV_ID_QEMU 0x1574 39#define I40E_DEV_ID_QEMU 0x1574
41#define I40E_DEV_ID_KX_A 0x157F 40#define I40E_DEV_ID_KX_A 0x157F
42#define I40E_DEV_ID_KX_B 0x1580 41#define I40E_DEV_ID_KX_B 0x1580
43#define I40E_DEV_ID_KX_C 0x1581 42#define I40E_DEV_ID_KX_C 0x1581
44#define I40E_DEV_ID_KX_D 0x1582
45#define I40E_DEV_ID_QSFP_A 0x1583 43#define I40E_DEV_ID_QSFP_A 0x1583
46#define I40E_DEV_ID_QSFP_B 0x1584 44#define I40E_DEV_ID_QSFP_B 0x1584
47#define I40E_DEV_ID_QSFP_C 0x1585 45#define I40E_DEV_ID_QSFP_C 0x1585
@@ -60,8 +58,8 @@
60/* Max default timeout in ms, */ 58/* Max default timeout in ms, */
61#define I40E_MAX_NVM_TIMEOUT 18000 59#define I40E_MAX_NVM_TIMEOUT 18000
62 60
63/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ 61/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
64#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) 62#define I40E_MS_TO_GTIME(time) ((time) * 1000)
65 63
66/* forward declaration */ 64/* forward declaration */
67struct i40e_hw; 65struct i40e_hw;
@@ -167,6 +165,9 @@ struct i40e_link_status {
167 u8 loopback; 165 u8 loopback;
168 /* is Link Status Event notification to SW enabled */ 166 /* is Link Status Event notification to SW enabled */
169 bool lse_enable; 167 bool lse_enable;
168 u16 max_frame_size;
169 bool crc_enable;
170 u8 pacing;
170}; 171};
171 172
172struct i40e_phy_info { 173struct i40e_phy_info {
@@ -409,6 +410,7 @@ struct i40e_driver_version {
409 u8 minor_version; 410 u8 minor_version;
410 u8 build_version; 411 u8 build_version;
411 u8 subbuild_version; 412 u8 subbuild_version;
413 u8 driver_string[32];
412}; 414};
413 415
414/* RX Descriptors */ 416/* RX Descriptors */
@@ -488,9 +490,6 @@ union i40e_32byte_rx_desc {
488 } wb; /* writeback */ 490 } wb; /* writeback */
489}; 491};
490 492
491#define I40E_RXD_QW1_STATUS_SHIFT 0
492#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
493
494enum i40e_rx_desc_status_bits { 493enum i40e_rx_desc_status_bits {
495 /* Note: These are predefined bit offsets */ 494 /* Note: These are predefined bit offsets */
496 I40E_RX_DESC_STATUS_DD_SHIFT = 0, 495 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -507,9 +506,14 @@ enum i40e_rx_desc_status_bits {
507 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, 506 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
508 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, 507 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
509 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ 508 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
510 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 509 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
510 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
511}; 511};
512 512
513#define I40E_RXD_QW1_STATUS_SHIFT 0
514#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
515 << I40E_RXD_QW1_STATUS_SHIFT)
516
513#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT 517#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
514#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ 518#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
515 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) 519 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -537,7 +541,8 @@ enum i40e_rx_desc_error_bits {
537 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
538 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
539 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
540 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
541}; 546};
542 547
543enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -658,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
658 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, 663 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
659 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ 664 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
660 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ 665 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
661 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
662 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, 666 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
663 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, 667 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
664 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, 668 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
862 866
863/* Packet Classifier Types for filters */ 867/* Packet Classifier Types for filters */
864enum i40e_filter_pctype { 868enum i40e_filter_pctype {
865 /* Note: Values 0-28 are reserved for future use */ 869 /* Note: Values 0-30 are reserved for future use */
866 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
867 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
868 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, 870 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
869 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, 871 /* Note: Value 32 is reserved for future use */
870 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, 872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
871 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, 873 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
872 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, 874 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
873 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, 875 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
874 /* Note: Values 37-38 are reserved for future use */ 876 /* Note: Values 37-40 are reserved for future use */
875 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
876 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, 877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, 878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, 879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -955,6 +955,16 @@ struct i40e_vsi_context {
955 struct i40e_aqc_vsi_properties_data info; 955 struct i40e_aqc_vsi_properties_data info;
956}; 956};
957 957
958struct i40e_veb_context {
959 u16 seid;
960 u16 uplink_seid;
961 u16 veb_number;
962 u16 vebs_allocated;
963 u16 vebs_unallocated;
964 u16 flags;
965 struct i40e_aqc_get_veb_parameters_completion info;
966};
967
958/* Statistics collected by each port, VSI, VEB, and S-channel */ 968/* Statistics collected by each port, VSI, VEB, and S-channel */
959struct i40e_eth_stats { 969struct i40e_eth_stats {
960 u64 rx_bytes; /* gorc */ 970 u64 rx_bytes; /* gorc */
@@ -962,8 +972,6 @@ struct i40e_eth_stats {
962 u64 rx_multicast; /* mprc */ 972 u64 rx_multicast; /* mprc */
963 u64 rx_broadcast; /* bprc */ 973 u64 rx_broadcast; /* bprc */
964 u64 rx_discards; /* rdpc */ 974 u64 rx_discards; /* rdpc */
965 u64 rx_errors; /* repc */
966 u64 rx_missed; /* rmpc */
967 u64 rx_unknown_protocol; /* rupp */ 975 u64 rx_unknown_protocol; /* rupp */
968 u64 tx_bytes; /* gotc */ 976 u64 tx_bytes; /* gotc */
969 u64 tx_unicast; /* uptc */ 977 u64 tx_unicast; /* uptc */
@@ -1015,9 +1023,12 @@ struct i40e_hw_port_stats {
1015 u64 tx_size_big; /* ptc9522 */ 1023 u64 tx_size_big; /* ptc9522 */
1016 u64 mac_short_packet_dropped; /* mspdc */ 1024 u64 mac_short_packet_dropped; /* mspdc */
1017 u64 checksum_error; /* xec */ 1025 u64 checksum_error; /* xec */
1026 /* flow director stats */
1027 u64 fd_atr_match;
1028 u64 fd_sb_match;
1018 /* EEE LPI */ 1029 /* EEE LPI */
1019 bool tx_lpi_status; 1030 u32 tx_lpi_status;
1020 bool rx_lpi_status; 1031 u32 rx_lpi_status;
1021 u64 tx_lpi_count; /* etlpic */ 1032 u64 tx_lpi_count; /* etlpic */
1022 u64 rx_lpi_count; /* erlpic */ 1033 u64 rx_lpi_count; /* erlpic */
1023}; 1034};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 22a1b69cd646..70951d2edcad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
341 int severity; 341 int severity;
342}; 342};
343 343
344/* The following are TBD, not necessary for LAN functionality.
345 * I40E_VIRTCHNL_OP_FCOE
346 */
347
348/* VF reset states - these are written into the RSTAT register: 344/* VF reset states - these are written into the RSTAT register:
349 * I40E_VFGEN_RSTAT1 on the PF 345 * I40E_VFGEN_RSTAT1 on the PF
350 * I40E_VFGEN_RSTAT on the VF 346 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..f5b9d2062573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
29/***********************misc routines*****************************/ 29/***********************misc routines*****************************/
30 30
31/** 31/**
32 * i40e_vc_disable_vf
33 * @pf: pointer to the pf info
34 * @vf: pointer to the vf info
35 *
36 * Disable the VF through a SW reset
37 **/
38static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
39{
40 struct i40e_hw *hw = &pf->hw;
41 u32 reg;
42
43 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
44 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
45 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
46 i40e_flush(hw);
47}
48
49/**
32 * i40e_vc_isvalid_vsi_id 50 * i40e_vc_isvalid_vsi_id
33 * @vf: pointer to the vf info 51 * @vf: pointer to the vf info
34 * @vsi_id: vf relative vsi id 52 * @vsi_id: vf relative vsi id
@@ -230,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
230 tx_ctx.qlen = info->ring_len; 248 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 249 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0; 250 tx_ctx.rdylist_act = 0;
233 tx_ctx.head_wb_ena = 1; 251 tx_ctx.head_wb_ena = info->headwb_enabled;
234 tx_ctx.head_wb_addr = info->dma_ring_addr + 252 tx_ctx.head_wb_addr = info->dma_headwb_addr;
235 (info->ring_len * sizeof(struct i40e_tx_desc));
236 253
237 /* clear the context in the HMC */ 254 /* clear the context in the HMC */
238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 255 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -336,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
336 rx_ctx.tphhead_ena = 1; 353 rx_ctx.tphhead_ena = 1;
337 rx_ctx.lrxqthresh = 2; 354 rx_ctx.lrxqthresh = 2;
338 rx_ctx.crcstrip = 1; 355 rx_ctx.crcstrip = 1;
356 rx_ctx.prefena = 1;
339 357
340 /* clear the context in the HMC */ 358 /* clear the context in the HMC */
341 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 359 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
416 if (ret) 434 if (ret)
417 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 435 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
418 436
437 /* Set VF bandwidth if specified */
438 if (vf->tx_rate) {
439 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
440 vf->tx_rate / 50, 0, NULL);
441 if (ret)
442 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
443 vf->vf_id, ret);
444 }
445
419error_alloc_vsi_res: 446error_alloc_vsi_res:
420 return ret; 447 return ret;
421} 448}
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
815 kfree(pf->vf); 842 kfree(pf->vf);
816 pf->vf = NULL; 843 pf->vf = NULL;
817 844
845 /* This check is for when the driver is unloaded while VFs are
846 * assigned. Setting the number of VFs to 0 through sysfs is caught
847 * before this function ever gets called.
848 */
818 if (!i40e_vfs_are_assigned(pf)) { 849 if (!i40e_vfs_are_assigned(pf)) {
819 pci_disable_sriov(pf->pdev); 850 pci_disable_sriov(pf->pdev);
820 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 851 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -867,6 +898,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
867 ret = -ENOMEM; 898 ret = -ENOMEM;
868 goto err_alloc; 899 goto err_alloc;
869 } 900 }
901 pf->vf = vfs;
870 902
871 /* apply default profile */ 903 /* apply default profile */
872 for (i = 0; i < num_alloc_vfs; i++) { 904 for (i = 0; i < num_alloc_vfs; i++) {
@@ -876,13 +908,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
876 908
877 /* assign default capabilities */ 909 /* assign default capabilities */
878 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 910 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
911 vfs[i].spoofchk = true;
879 /* vf resources get allocated during reset */ 912 /* vf resources get allocated during reset */
880 i40e_reset_vf(&vfs[i], false); 913 i40e_reset_vf(&vfs[i], false);
881 914
882 /* enable vf vplan_qtable mappings */ 915 /* enable vf vplan_qtable mappings */
883 i40e_enable_vf_mappings(&vfs[i]); 916 i40e_enable_vf_mappings(&vfs[i]);
884 } 917 }
885 pf->vf = vfs;
886 pf->num_alloc_vfs = num_alloc_vfs; 918 pf->num_alloc_vfs = num_alloc_vfs;
887 919
888 i40e_enable_pf_switch_lb(pf); 920 i40e_enable_pf_switch_lb(pf);
@@ -951,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
951 if (num_vfs) 983 if (num_vfs)
952 return i40e_pci_sriov_enable(pdev, num_vfs); 984 return i40e_pci_sriov_enable(pdev, num_vfs);
953 985
954 i40e_free_vfs(pf); 986 if (!i40e_vfs_are_assigned(pf)) {
987 i40e_free_vfs(pf);
988 } else {
989 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
990 return -EINVAL;
991 }
955 return 0; 992 return 0;
956} 993}
957 994
@@ -2022,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2022 } 2059 }
2023 2060
2024 /* delete the temporary mac address */ 2061 /* delete the temporary mac address */
2025 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); 2062 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2063 true, false);
2026 2064
2027 /* add the new mac address */ 2065 /* Delete all the filters for this VSI - we're going to kill it
2028 f = i40e_add_filter(vsi, mac, 0, true, false); 2066 * anyway.
2029 if (!f) { 2067 */
2030 dev_err(&pf->pdev->dev, 2068 list_for_each_entry(f, &vsi->mac_filter_list, list)
2031 "Unable to add VF ucast filter\n"); 2069 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2032 ret = -ENOMEM;
2033 goto error_param;
2034 }
2035 2070
2036 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2071 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2037 /* program mac filter */ 2072 /* program mac filter */
@@ -2040,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2040 ret = -EIO; 2075 ret = -EIO;
2041 goto error_param; 2076 goto error_param;
2042 } 2077 }
2043 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); 2078 ether_addr_copy(vf->default_lan_addr.addr, mac);
2044 vf->pf_set_mac = true; 2079 vf->pf_set_mac = true;
2045 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2080 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2046 ret = 0; 2081 ret = 0;
@@ -2088,18 +2123,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2088 goto error_pvid; 2123 goto error_pvid;
2089 } 2124 }
2090 2125
2091 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) 2126 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
2092 dev_err(&pf->pdev->dev, 2127 dev_err(&pf->pdev->dev,
2093 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2128 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2094 vf_id); 2129 vf_id);
2130 /* Administrator Error - knock the VF offline until he does
2131 * the right thing by reconfiguring his network correctly
2132 * and then reloading the VF driver.
2133 */
2134 i40e_vc_disable_vf(pf, vf);
2135 }
2095 2136
2096 /* Check for condition where there was already a port VLAN ID 2137 /* Check for condition where there was already a port VLAN ID
2097 * filter set and now it is being deleted by setting it to zero. 2138 * filter set and now it is being deleted by setting it to zero.
2139 * Additionally check for the condition where there was a port
2140 * VLAN but now there is a new and different port VLAN being set.
2098 * Before deleting all the old VLAN filters we must add new ones 2141 * Before deleting all the old VLAN filters we must add new ones
2099 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2142 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2100 * MAC addresses deleted. 2143 * MAC addresses deleted.
2101 */ 2144 */
2102 if (!(vlan_id || qos) && vsi->info.pvid) 2145 if ((!(vlan_id || qos) ||
2146 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
2147 vsi->info.pvid)
2103 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2148 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2104 2149
2105 if (vsi->info.pvid) { 2150 if (vsi->info.pvid) {
@@ -2150,6 +2195,8 @@ error_pvid:
2150 return ret; 2195 return ret;
2151} 2196}
2152 2197
2198#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
2199#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
2153/** 2200/**
2154 * i40e_ndo_set_vf_bw 2201 * i40e_ndo_set_vf_bw
2155 * @netdev: network interface device structure 2202 * @netdev: network interface device structure
@@ -2158,9 +2205,76 @@ error_pvid:
2158 * 2205 *
2159 * configure vf tx rate 2206 * configure vf tx rate
2160 **/ 2207 **/
2161int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) 2208int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2209 int max_tx_rate)
2162{ 2210{
2163 return -EOPNOTSUPP; 2211 struct i40e_netdev_priv *np = netdev_priv(netdev);
2212 struct i40e_pf *pf = np->vsi->back;
2213 struct i40e_vsi *vsi;
2214 struct i40e_vf *vf;
2215 int speed = 0;
2216 int ret = 0;
2217
2218 /* validate the request */
2219 if (vf_id >= pf->num_alloc_vfs) {
2220 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2221 ret = -EINVAL;
2222 goto error;
2223 }
2224
2225 if (min_tx_rate) {
2226 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2227 min_tx_rate, vf_id);
2228 return -EINVAL;
2229 }
2230
2231 vf = &(pf->vf[vf_id]);
2232 vsi = pf->vsi[vf->lan_vsi_index];
2233 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2234 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2235 ret = -EINVAL;
2236 goto error;
2237 }
2238
2239 switch (pf->hw.phy.link_info.link_speed) {
2240 case I40E_LINK_SPEED_40GB:
2241 speed = 40000;
2242 break;
2243 case I40E_LINK_SPEED_10GB:
2244 speed = 10000;
2245 break;
2246 case I40E_LINK_SPEED_1GB:
2247 speed = 1000;
2248 break;
2249 default:
2250 break;
2251 }
2252
2253 if (max_tx_rate > speed) {
2254 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
2255 max_tx_rate, vf->vf_id);
2256 ret = -EINVAL;
2257 goto error;
2258 }
2259
2260 if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2261 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2262 max_tx_rate = 50;
2263 }
2264
2265 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2266 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2267 max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2268 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2269 if (ret) {
2270 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2271 ret);
2272 ret = -EIO;
2273 goto error;
2274 }
2275 vf->tx_rate = max_tx_rate;
2276error:
2277 return ret;
2164} 2278}
2165 2279
2166/** 2280/**
@@ -2200,10 +2314,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2200 2314
2201 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2315 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2202 2316
2203 ivi->tx_rate = 0; 2317 ivi->max_tx_rate = vf->tx_rate;
2318 ivi->min_tx_rate = 0;
2204 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2319 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2205 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2320 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2206 I40E_VLAN_PRIORITY_SHIFT; 2321 I40E_VLAN_PRIORITY_SHIFT;
2322 if (vf->link_forced == false)
2323 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2324 else if (vf->link_up == true)
2325 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2326 else
2327 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2328 ivi->spoofchk = vf->spoofchk;
2207 ret = 0; 2329 ret = 0;
2208 2330
2209error_param: 2331error_param:
@@ -2270,3 +2392,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2270error_out: 2392error_out:
2271 return ret; 2393 return ret;
2272} 2394}
2395
2396/**
2397 * i40e_ndo_set_vf_spoofchk
2398 * @netdev: network interface device structure
2399 * @vf_id: vf identifier
2400 * @enable: flag to enable or disable feature
2401 *
2402 * Enable or disable VF spoof checking
2403 **/
2404int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
2405{
2406 struct i40e_netdev_priv *np = netdev_priv(netdev);
2407 struct i40e_vsi *vsi = np->vsi;
2408 struct i40e_pf *pf = vsi->back;
2409 struct i40e_vsi_context ctxt;
2410 struct i40e_hw *hw = &pf->hw;
2411 struct i40e_vf *vf;
2412 int ret = 0;
2413
2414 /* validate the request */
2415 if (vf_id >= pf->num_alloc_vfs) {
2416 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2417 ret = -EINVAL;
2418 goto out;
2419 }
2420
2421 vf = &(pf->vf[vf_id]);
2422
2423 if (enable == vf->spoofchk)
2424 goto out;
2425
2426 vf->spoofchk = enable;
2427 memset(&ctxt, 0, sizeof(ctxt));
2428 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
2429 ctxt.pf_num = pf->hw.pf_id;
2430 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2431 if (enable)
2432 ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
2433 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2434 if (ret) {
2435 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2436 ret);
2437 ret = -EIO;
2438 }
2439out:
2440 return ret;
2441}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..63e7e0d81ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,8 +98,10 @@ struct i40e_vf {
98 98
99 unsigned long vf_caps; /* vf's adv. capabilities */ 99 unsigned long vf_caps; /* vf's adv. capabilities */
100 unsigned long vf_states; /* vf's runtime states */ 100 unsigned long vf_states; /* vf's runtime states */
101 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
101 bool link_forced; 102 bool link_forced;
102 bool link_up; /* only valid if vf link is forced */ 103 bool link_up; /* only valid if vf link is forced */
104 bool spoofchk;
103}; 105};
104 106
105void i40e_free_vfs(struct i40e_pf *pf); 107void i40e_free_vfs(struct i40e_pf *pf);
@@ -115,10 +117,12 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
115int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); 117int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
116int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 118int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
117 int vf_id, u16 vlan_id, u8 qos); 119 int vf_id, u16 vlan_id, u8 qos);
118int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); 120int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
121 int max_tx_rate);
119int i40e_ndo_get_vf_config(struct net_device *netdev, 122int i40e_ndo_get_vf_config(struct net_device *netdev,
120 int vf_id, struct ifla_vf_info *ivi); 123 int vf_id, struct ifla_vf_info *ivi);
121int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 124int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
125int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
122 126
123void i40e_vc_notify_link_state(struct i40e_pf *pf); 127void i40e_vc_notify_link_state(struct i40e_pf *pf);
124void i40e_vc_notify_reset(struct i40e_pf *pf); 128void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index e09be37a07a8..3a423836a565 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4# Copyright(c) 2013 Intel Corporation. 4# Copyright(c) 2013 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along
16# with this program. If not, see <http://www.gnu.org/licenses/>.
17#
15# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
16# the file called "COPYING". 19# the file called "COPYING".
17# 20#
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..eb67cce3e8f9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -28,6 +31,16 @@
28#include "i40e_prototype.h" 31#include "i40e_prototype.h"
29 32
30/** 33/**
34 * i40e_is_nvm_update_op - return true if this is an NVM update operation
35 * @desc: API request descriptor
36 **/
37static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
38{
39 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
40 (desc->opcode == i40e_aqc_opc_nvm_update);
41}
42
43/**
31 * i40e_adminq_init_regs - Initialize AdminQ registers 44 * i40e_adminq_init_regs - Initialize AdminQ registers
32 * @hw: pointer to the hardware structure 45 * @hw: pointer to the hardware structure
33 * 46 *
@@ -276,8 +289,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
276 * 289 *
277 * Configure base address and length registers for the transmit queue 290 * Configure base address and length registers for the transmit queue
278 **/ 291 **/
279static void i40e_config_asq_regs(struct i40e_hw *hw) 292static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
280{ 293{
294 i40e_status ret_code = 0;
295 u32 reg = 0;
296
281 if (hw->mac.type == I40E_MAC_VF) { 297 if (hw->mac.type == I40E_MAC_VF) {
282 /* configure the transmit queue */ 298 /* configure the transmit queue */
283 wr32(hw, I40E_VF_ATQBAH1, 299 wr32(hw, I40E_VF_ATQBAH1,
@@ -286,6 +302,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
286 lower_32_bits(hw->aq.asq.desc_buf.pa)); 302 lower_32_bits(hw->aq.asq.desc_buf.pa));
287 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | 303 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
288 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 304 I40E_VF_ATQLEN1_ATQENABLE_MASK));
305 reg = rd32(hw, I40E_VF_ATQBAL1);
289 } else { 306 } else {
290 /* configure the transmit queue */ 307 /* configure the transmit queue */
291 wr32(hw, I40E_PF_ATQBAH, 308 wr32(hw, I40E_PF_ATQBAH,
@@ -294,7 +311,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
294 lower_32_bits(hw->aq.asq.desc_buf.pa)); 311 lower_32_bits(hw->aq.asq.desc_buf.pa));
295 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | 312 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
296 I40E_PF_ATQLEN_ATQENABLE_MASK)); 313 I40E_PF_ATQLEN_ATQENABLE_MASK));
314 reg = rd32(hw, I40E_PF_ATQBAL);
297 } 315 }
316
317 /* Check one register to verify that config was applied */
318 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
319 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
320
321 return ret_code;
298} 322}
299 323
300/** 324/**
@@ -303,8 +327,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
303 * 327 *
304 * Configure base address and length registers for the receive (event queue) 328 * Configure base address and length registers for the receive (event queue)
305 **/ 329 **/
306static void i40e_config_arq_regs(struct i40e_hw *hw) 330static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
307{ 331{
332 i40e_status ret_code = 0;
333 u32 reg = 0;
334
308 if (hw->mac.type == I40E_MAC_VF) { 335 if (hw->mac.type == I40E_MAC_VF) {
309 /* configure the receive queue */ 336 /* configure the receive queue */
310 wr32(hw, I40E_VF_ARQBAH1, 337 wr32(hw, I40E_VF_ARQBAH1,
@@ -313,6 +340,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
313 lower_32_bits(hw->aq.arq.desc_buf.pa)); 340 lower_32_bits(hw->aq.arq.desc_buf.pa));
314 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | 341 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
315 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 342 I40E_VF_ARQLEN1_ARQENABLE_MASK));
343 reg = rd32(hw, I40E_VF_ARQBAL1);
316 } else { 344 } else {
317 /* configure the receive queue */ 345 /* configure the receive queue */
318 wr32(hw, I40E_PF_ARQBAH, 346 wr32(hw, I40E_PF_ARQBAH,
@@ -321,10 +349,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
321 lower_32_bits(hw->aq.arq.desc_buf.pa)); 349 lower_32_bits(hw->aq.arq.desc_buf.pa));
322 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | 350 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
323 I40E_PF_ARQLEN_ARQENABLE_MASK)); 351 I40E_PF_ARQLEN_ARQENABLE_MASK));
352 reg = rd32(hw, I40E_PF_ARQBAL);
324 } 353 }
325 354
326 /* Update tail in the HW to post pre-allocated buffers */ 355 /* Update tail in the HW to post pre-allocated buffers */
327 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 356 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
357
358 /* Check one register to verify that config was applied */
359 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
360 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
361
362 return ret_code;
328} 363}
329 364
330/** 365/**
@@ -372,7 +407,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
372 goto init_adminq_free_rings; 407 goto init_adminq_free_rings;
373 408
374 /* initialize base registers */ 409 /* initialize base registers */
375 i40e_config_asq_regs(hw); 410 ret_code = i40e_config_asq_regs(hw);
411 if (ret_code)
412 goto init_adminq_free_rings;
376 413
377 /* success! */ 414 /* success! */
378 goto init_adminq_exit; 415 goto init_adminq_exit;
@@ -429,7 +466,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
429 goto init_adminq_free_rings; 466 goto init_adminq_free_rings;
430 467
431 /* initialize base registers */ 468 /* initialize base registers */
432 i40e_config_arq_regs(hw); 469 ret_code = i40e_config_arq_regs(hw);
470 if (ret_code)
471 goto init_adminq_free_rings;
433 472
434 /* success! */ 473 /* success! */
435 goto init_adminq_exit; 474 goto init_adminq_exit;
@@ -659,6 +698,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
659 goto asq_send_command_exit; 698 goto asq_send_command_exit;
660 } 699 }
661 700
701 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
702 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
703 status = I40E_ERR_NVM;
704 goto asq_send_command_exit;
705 }
706
662 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 707 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
663 if (cmd_details) { 708 if (cmd_details) {
664 *details = *cmd_details; 709 *details = *cmd_details;
@@ -786,6 +831,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
786 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 831 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
787 } 832 }
788 833
834 if (i40e_is_nvm_update_op(desc))
835 hw->aq.nvm_busy = true;
836
789 /* update the error if time out occurred */ 837 /* update the error if time out occurred */
790 if ((!cmd_completed) && 838 if ((!cmd_completed) &&
791 (!details->async && !details->postpone)) { 839 (!details->async && !details->postpone)) {
@@ -880,6 +928,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
880 e->msg_size); 928 e->msg_size);
881 } 929 }
882 930
931 if (i40e_is_nvm_update_op(&e->desc))
932 hw->aq.nvm_busy = false;
933
883 /* Restore the original datalen and buffer address in the desc, 934 /* Restore the original datalen and buffer address in the desc,
884 * FW updates datalen to indicate the event message 935 * FW updates datalen to indicate the event message
885 * size 936 * size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..e3472c62e155 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
87 u16 fw_min_ver; /* firmware minor version */ 90 u16 fw_min_ver; /* firmware minor version */
88 u16 api_maj_ver; /* api major version */ 91 u16 api_maj_ver; /* api major version */
89 u16 api_min_ver; /* api minor version */ 92 u16 api_min_ver; /* api minor version */
93 bool nvm_busy;
90 94
91 struct mutex asq_mutex; /* Send queue lock */ 95 struct mutex asq_mutex; /* Send queue lock */
92 struct mutex arq_mutex; /* Receive queue lock */ 96 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..e656ea7a7920 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -31,7 +34,7 @@
31 */ 34 */
32 35
33#define I40E_FW_API_VERSION_MAJOR 0x0001 36#define I40E_FW_API_VERSION_MAJOR 0x0001
34#define I40E_FW_API_VERSION_MINOR 0x0001 37#define I40E_FW_API_VERSION_MINOR 0x0002
35#define I40E_FW_API_VERSION_A0_MINOR 0x0000 38#define I40E_FW_API_VERSION_A0_MINOR 0x0000
36 39
37struct i40e_aq_desc { 40struct i40e_aq_desc {
@@ -121,6 +124,7 @@ enum i40e_admin_queue_opc {
121 i40e_aqc_opc_get_version = 0x0001, 124 i40e_aqc_opc_get_version = 0x0001,
122 i40e_aqc_opc_driver_version = 0x0002, 125 i40e_aqc_opc_driver_version = 0x0002,
123 i40e_aqc_opc_queue_shutdown = 0x0003, 126 i40e_aqc_opc_queue_shutdown = 0x0003,
127 i40e_aqc_opc_set_pf_context = 0x0004,
124 128
125 /* resource ownership */ 129 /* resource ownership */
126 i40e_aqc_opc_request_resource = 0x0008, 130 i40e_aqc_opc_request_resource = 0x0008,
@@ -180,9 +184,6 @@ enum i40e_admin_queue_opc {
180 i40e_aqc_opc_add_mirror_rule = 0x0260, 184 i40e_aqc_opc_add_mirror_rule = 0x0260,
181 i40e_aqc_opc_delete_mirror_rule = 0x0261, 185 i40e_aqc_opc_delete_mirror_rule = 0x0261,
182 186
183 i40e_aqc_opc_set_storm_control_config = 0x0280,
184 i40e_aqc_opc_get_storm_control_config = 0x0281,
185
186 /* DCB commands */ 187 /* DCB commands */
187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301, 188 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
188 i40e_aqc_opc_dcb_updated = 0x0302, 189 i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +206,7 @@ enum i40e_admin_queue_opc {
205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, 206 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
206 i40e_aqc_opc_suspend_port_tx = 0x041B, 207 i40e_aqc_opc_suspend_port_tx = 0x041B,
207 i40e_aqc_opc_resume_port_tx = 0x041C, 208 i40e_aqc_opc_resume_port_tx = 0x041C,
209 i40e_aqc_opc_configure_partition_bw = 0x041D,
208 210
209 /* hmc */ 211 /* hmc */
210 i40e_aqc_opc_query_hmc_resource_profile = 0x0500, 212 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +224,15 @@ enum i40e_admin_queue_opc {
222 i40e_aqc_opc_get_partner_advt = 0x0616, 224 i40e_aqc_opc_get_partner_advt = 0x0616,
223 i40e_aqc_opc_set_lb_modes = 0x0618, 225 i40e_aqc_opc_set_lb_modes = 0x0618,
224 i40e_aqc_opc_get_phy_wol_caps = 0x0621, 226 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
225 i40e_aqc_opc_set_phy_reset = 0x0622, 227 i40e_aqc_opc_set_phy_debug = 0x0622,
226 i40e_aqc_opc_upload_ext_phy_fm = 0x0625, 228 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
227 229
228 /* NVM commands */ 230 /* NVM commands */
229 i40e_aqc_opc_nvm_read = 0x0701, 231 i40e_aqc_opc_nvm_read = 0x0701,
230 i40e_aqc_opc_nvm_erase = 0x0702, 232 i40e_aqc_opc_nvm_erase = 0x0702,
231 i40e_aqc_opc_nvm_update = 0x0703, 233 i40e_aqc_opc_nvm_update = 0x0703,
234 i40e_aqc_opc_nvm_config_read = 0x0704,
235 i40e_aqc_opc_nvm_config_write = 0x0705,
232 236
233 /* virtualization commands */ 237 /* virtualization commands */
234 i40e_aqc_opc_send_msg_to_pf = 0x0801, 238 i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +274,6 @@ enum i40e_admin_queue_opc {
270 i40e_aqc_opc_debug_set_mode = 0xFF01, 274 i40e_aqc_opc_debug_set_mode = 0xFF01,
271 i40e_aqc_opc_debug_read_reg = 0xFF03, 275 i40e_aqc_opc_debug_read_reg = 0xFF03,
272 i40e_aqc_opc_debug_write_reg = 0xFF04, 276 i40e_aqc_opc_debug_write_reg = 0xFF04,
273 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
274 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
275 i40e_aqc_opc_debug_modify_reg = 0xFF07, 277 i40e_aqc_opc_debug_modify_reg = 0xFF07,
276 i40e_aqc_opc_debug_dump_internals = 0xFF08, 278 i40e_aqc_opc_debug_dump_internals = 0xFF08,
277 i40e_aqc_opc_debug_modify_internals = 0xFF09, 279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -339,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
339 341
340I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); 342I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
341 343
344/* Set PF context (0x0004, direct) */
345struct i40e_aqc_set_pf_context {
346 u8 pf_id;
347 u8 reserved[15];
348};
349
350I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
351
342/* Request resource ownership (direct 0x0008) 352/* Request resource ownership (direct 0x0008)
343 * Release resource ownership (direct 0x0009) 353 * Release resource ownership (direct 0x0009)
344 */ 354 */
@@ -678,7 +688,6 @@ struct i40e_aqc_add_get_update_vsi {
678#define I40E_AQ_VSI_TYPE_PF 0x2 688#define I40E_AQ_VSI_TYPE_PF 0x2
679#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 689#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
680#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 690#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
681#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
682 __le32 addr_high; 691 __le32 addr_high;
683 __le32 addr_low; 692 __le32 addr_low;
684}; 693};
@@ -1040,7 +1049,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
1040#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 1049#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1041 __le16 seid; 1050 __le16 seid;
1042#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF 1051#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1043 u8 reserved[10]; 1052 __le16 vlan_tag;
1053#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
1054 u8 reserved[8];
1044}; 1055};
1045 1056
1046I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); 1057I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1300,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
1289 1300
1290I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); 1301I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1291 1302
1292/* Set Storm Control Configuration (direct 0x0280)
1293 * Get Storm Control Configuration (direct 0x0281)
1294 * the command and response use the same descriptor structure
1295 */
1296struct i40e_aqc_set_get_storm_control_config {
1297 __le32 broadcast_threshold;
1298 __le32 multicast_threshold;
1299 __le32 control_flags;
1300#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1301#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1302#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1303#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1304#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1305#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1306#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1307 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1308 u8 reserved[4];
1309};
1310
1311I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1312
1313/* DCB 0x03xx*/ 1303/* DCB 0x03xx*/
1314 1304
1315/* PFC Ignore (direct 0x0301) 1305/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1427struct i40e_aqc_configure_switching_comp_ets_data { 1417struct i40e_aqc_configure_switching_comp_ets_data {
1428 u8 reserved[4]; 1418 u8 reserved[4];
1429 u8 tc_valid_bits; 1419 u8 tc_valid_bits;
1430 u8 reserved1; 1420 u8 seepage;
1421#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
1431 u8 tc_strict_priority_flags; 1422 u8 tc_strict_priority_flags;
1432 u8 reserved2[17]; 1423 u8 reserved1[17];
1433 u8 tc_bw_share_credits[8]; 1424 u8 tc_bw_share_credits[8];
1434 u8 reserved3[96]; 1425 u8 reserved2[96];
1435}; 1426};
1436 1427
1437/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1428/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1490,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1499 * (direct 0x041B and 0x041C) uses the generic SEID struct 1490 * (direct 0x041B and 0x041C) uses the generic SEID struct
1500 */ 1491 */
1501 1492
1493/* Configure partition BW
1494 * (indirect 0x041D)
1495 */
1496struct i40e_aqc_configure_partition_bw_data {
1497 __le16 pf_valid_bits;
1498 u8 min_bw[16]; /* guaranteed bandwidth */
1499 u8 max_bw[16]; /* bandwidth limit */
1500};
1501
1502/* Get and set the active HMC resource profile and status. 1502/* Get and set the active HMC resource profile and status.
1503 * (direct 0x0500) and (direct 0x0501) 1503 * (direct 0x0500) and (direct 0x0501)
1504 */ 1504 */
@@ -1539,6 +1539,8 @@ enum i40e_aq_phy_type {
1539 I40E_PHY_TYPE_XLPPI = 0x9, 1539 I40E_PHY_TYPE_XLPPI = 0x9,
1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, 1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, 1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1542 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
1543 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
1542 I40E_PHY_TYPE_100BASE_TX = 0x11, 1544 I40E_PHY_TYPE_100BASE_TX = 0x11,
1543 I40E_PHY_TYPE_1000BASE_T = 0x12, 1545 I40E_PHY_TYPE_1000BASE_T = 0x12,
1544 I40E_PHY_TYPE_10GBASE_T = 0x13, 1546 I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1551,10 @@ enum i40e_aq_phy_type {
1549 I40E_PHY_TYPE_40GBASE_CR4 = 0x18, 1551 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1550 I40E_PHY_TYPE_40GBASE_SR4 = 0x19, 1552 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1551 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, 1553 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1552 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, 1554 I40E_PHY_TYPE_1000BASE_SX = 0x1B,
1555 I40E_PHY_TYPE_1000BASE_LX = 0x1C,
1556 I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
1557 I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
1553 I40E_PHY_TYPE_MAX 1558 I40E_PHY_TYPE_MAX
1554}; 1559};
1555 1560
@@ -1583,11 +1588,8 @@ struct i40e_aq_get_phy_abilities_resp {
1583#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 1588#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1584#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 1589#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1585#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 1590#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1586#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 1591#define I40E_AQ_PHY_LINK_ENABLED 0x08
1587#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) 1592#define I40E_AQ_PHY_AN_ENABLED 0x10
1588#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1589#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1590#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1591#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 1593#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1592 __le16 eee_capability; 1594 __le16 eee_capability;
1593#define I40E_AQ_EEE_100BASE_TX 0x0002 1595#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1698,7 @@ struct i40e_aqc_get_link_status {
1696#define I40E_AQ_LINK_TX_ACTIVE 0x00 1698#define I40E_AQ_LINK_TX_ACTIVE 0x00
1697#define I40E_AQ_LINK_TX_DRAINED 0x01 1699#define I40E_AQ_LINK_TX_DRAINED 0x01
1698#define I40E_AQ_LINK_TX_FLUSHED 0x03 1700#define I40E_AQ_LINK_TX_FLUSHED 0x03
1701#define I40E_AQ_LINK_FORCED_40G 0x10
1699 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ 1702 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1700 __le16 max_frame_size; 1703 __le16 max_frame_size;
1701 u8 config; 1704 u8 config;
@@ -1747,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
1747 1750
1748I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); 1751I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1749 1752
1750/* Set PHY Reset command (0x0622) */ 1753/* Set PHY Debug command (0x0622) */
1751struct i40e_aqc_set_phy_reset { 1754struct i40e_aqc_set_phy_debug {
1752 u8 reset_flags; 1755 u8 command_flags;
1753#define I40E_AQ_PHY_RESET_REQUEST 0x02 1756#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
1757#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
1758#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
1759 I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
1760#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
1761#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
1762#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
1763#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
1754 u8 reserved[15]; 1764 u8 reserved[15];
1755}; 1765};
1756 1766
1757I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); 1767I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
1758 1768
1759enum i40e_aq_phy_reg_type { 1769enum i40e_aq_phy_reg_type {
1760 I40E_AQC_PHY_REG_INTERNAL = 0x1, 1770 I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1789,47 @@ struct i40e_aqc_nvm_update {
1779 1789
1780I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); 1790I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1781 1791
1792/* NVM Config Read (indirect 0x0704) */
1793struct i40e_aqc_nvm_config_read {
1794 __le16 cmd_flags;
1795#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1796#define ANVM_READ_SINGLE_FEATURE 0
1797#define ANVM_READ_MULTIPLE_FEATURES 1
1798 __le16 element_count;
1799 __le16 element_id; /* Feature/field ID */
1800 u8 reserved[2];
1801 __le32 address_high;
1802 __le32 address_low;
1803};
1804
1805I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
1806
1807/* NVM Config Write (indirect 0x0705) */
1808struct i40e_aqc_nvm_config_write {
1809 __le16 cmd_flags;
1810 __le16 element_count;
1811 u8 reserved[4];
1812 __le32 address_high;
1813 __le32 address_low;
1814};
1815
1816I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1817
1818struct i40e_aqc_nvm_config_data_feature {
1819 __le16 feature_id;
1820 __le16 instance_id;
1821 __le16 feature_options;
1822 __le16 feature_selection;
1823};
1824
1825struct i40e_aqc_nvm_config_data_immediate_field {
1826#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
1827 __le16 field_id;
1828 __le16 instance_id;
1829 __le16 field_options;
1830 __le16 field_value;
1831};
1832
1782/* Send to PF command (indirect 0x0801) id is only used by PF 1833/* Send to PF command (indirect 0x0801) id is only used by PF
1783 * Send to VF command (indirect 0x0802) id is only used by PF 1834 * Send to VF command (indirect 0x0802) id is only used by PF
1784 * Send to Peer PF command (indirect 0x0803) 1835 * Send to Peer PF command (indirect 0x0803)
@@ -1948,19 +1999,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
1948/* Add Udp Tunnel command and completion (direct 0x0B00) */ 1999/* Add Udp Tunnel command and completion (direct 0x0B00) */
1949struct i40e_aqc_add_udp_tunnel { 2000struct i40e_aqc_add_udp_tunnel {
1950 __le16 udp_port; 2001 __le16 udp_port;
1951 u8 header_len; /* in DWords, 1 to 15 */ 2002 u8 reserved0[3];
1952 u8 protocol_type; 2003 u8 protocol_type;
1953#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0 2004#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
1954#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2 2005#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
1955#define I40E_AQC_TUNNEL_TYPE_NGE 0x3 2006#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
1956 u8 variable_udp_length; 2007 u8 reserved1[10];
1957#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
1958#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
1959 u8 udp_key_index;
1960#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
1961#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
1962#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
1963 u8 reserved[10];
1964}; 2008};
1965 2009
1966I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); 2010I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index d8654fb9e525..8e6a6dd9212b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index ae084378faab..a43155afdbe2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -40,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
40 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
41 switch (hw->device_id) { 44 switch (hw->device_id) {
42 case I40E_DEV_ID_SFP_XL710: 45 case I40E_DEV_ID_SFP_XL710:
43 case I40E_DEV_ID_SFP_X710:
44 case I40E_DEV_ID_QEMU: 46 case I40E_DEV_ID_QEMU:
45 case I40E_DEV_ID_KX_A: 47 case I40E_DEV_ID_KX_A:
46 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_B:
47 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_KX_C:
48 case I40E_DEV_ID_KX_D:
49 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_A:
50 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_B:
51 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_QSFP_C:
@@ -130,7 +131,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
130 **/ 131 **/
131bool i40evf_check_asq_alive(struct i40e_hw *hw) 132bool i40evf_check_asq_alive(struct i40e_hw *hw)
132{ 133{
133 return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); 134 if (hw->aq.asq.len)
135 return !!(rd32(hw, hw->aq.asq.len) &
136 I40E_PF_ATQLEN_ATQENABLE_MASK);
137 else
138 return false;
134} 139}
135 140
136/** 141/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index cb97b3eed440..a2ad9a4e399d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -160,11 +163,6 @@ struct i40e_hmc_info {
160 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ 163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
161 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
162 165
163#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
164 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
165 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
166 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
167
168/** 166/**
169 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit 167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
170 * @hmc_info: pointer to the HMC configuration information structure 168 * @hmc_info: pointer to the HMC configuration information structure
@@ -223,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
223 u32 pd_index); 221 u32 pd_index);
224i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 222i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
225 struct i40e_hmc_info *hmc_info, 223 struct i40e_hmc_info *hmc_info,
226 u32 idx, bool is_pf); 224 u32 idx);
227i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 225i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
228 u32 idx); 226 u32 idx);
229i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 227i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..d6f762241537 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
53 u8 tphdata_ena; 56 u8 tphdata_ena;
54 u8 tphhead_ena; 57 u8 tphhead_ena;
55 u8 lrxqthresh; 58 u8 lrxqthresh;
59 u8 prefena; /* NOTE: normally must be set to 1 at init */
56}; 60};
57 61
58/* Tx queue context data */ 62/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 622f373b745d..21a91b14bf81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 97ab8c2b76f8..849edcc2e398 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 30af953cf106..369839655818 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -1337,8 +1340,6 @@
1337#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) 1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1338#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1339#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) 1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1340#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1341#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1342#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) 1344#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1344#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 1345#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1364,8 +1365,6 @@
1364#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) 1365#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1365#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) 1367#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1367#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1368#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 1368#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1370#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) 1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1371#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 1370#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1586,6 +1585,14 @@
1586#define I40E_GLLAN_TSOMSK_M 0x000442DC 1585#define I40E_GLLAN_TSOMSK_M 0x000442DC
1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 1586#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1588#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) 1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1588#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
1589#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
1590#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
1591#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
1592#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
1593#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
1594#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
1595
1589#define I40E_PFLAN_QALLOC 0x001C0400 1596#define I40E_PFLAN_QALLOC 0x001C0400
1590#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1597#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1591#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1598#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7c08cc2e339b..7fa7a41915c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b9f50f40abe1..48ebb6cd69f2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -725,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
725 u32 rx_error, 728 u32 rx_error,
726 u16 rx_ptype) 729 u16 rx_ptype)
727{ 730{
731 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
732 bool ipv4 = false, ipv6 = false;
728 bool ipv4_tunnel, ipv6_tunnel; 733 bool ipv4_tunnel, ipv6_tunnel;
729 __wsum rx_udp_csum; 734 __wsum rx_udp_csum;
730 __sum16 csum;
731 struct iphdr *iph; 735 struct iphdr *iph;
736 __sum16 csum;
732 737
733 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 738 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
734 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 739 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -739,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
739 skb->ip_summed = CHECKSUM_NONE; 744 skb->ip_summed = CHECKSUM_NONE;
740 745
741 /* Rx csum enabled and ip headers found? */ 746 /* Rx csum enabled and ip headers found? */
742 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 747 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
743 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
744 return; 748 return;
745 749
750 /* did the hardware decode the packet and checksum? */
751 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
752 return;
753
754 /* both known and outer_ip must be set for the below code to work */
755 if (!(decoded.known && decoded.outer_ip))
756 return;
757
758 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
759 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
760 ipv4 = true;
761 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
762 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
763 ipv6 = true;
764
765 if (ipv4 &&
766 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
767 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
768 goto checksum_fail;
769
746 /* likely incorrect csum if alternate IP extension headers found */ 770 /* likely incorrect csum if alternate IP extension headers found */
747 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 771 if (ipv6 &&
772 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
773 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
774 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
775 /* don't increment checksum err here, non-fatal err */
748 return; 776 return;
749 777
750 /* IP or L4 or outmost IP checksum error */ 778 /* there was some L4 error, count error and punt packet to the stack */
751 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 779 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
752 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 780 goto checksum_fail;
753 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 781
754 vsi->back->hw_csum_rx_error++; 782 /* handle packets that were not able to be checksummed due
783 * to arrival speed, in this case the stack can compute
784 * the csum.
785 */
786 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
755 return; 787 return;
756 }
757 788
789 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
790 * it in the driver, hardware does not do it for us.
791 * Since L3L4P bit was set we assume a valid IHL value (>=5)
792 * so the total length of IPv4 header is IHL*4 bytes
793 * The UDP_0 bit *may* bet set if the *inner* header is UDP
794 */
758 if (ipv4_tunnel && 795 if (ipv4_tunnel &&
796 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
759 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 797 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
760 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
761 * it in the driver, hardware does not do it for us.
762 * Since L3L4P bit was set we assume a valid IHL value (>=5)
763 * so the total length of IPv4 header is IHL*4 bytes
764 */
765 skb->transport_header = skb->mac_header + 798 skb->transport_header = skb->mac_header +
766 sizeof(struct ethhdr) + 799 sizeof(struct ethhdr) +
767 (ip_hdr(skb)->ihl * 4); 800 (ip_hdr(skb)->ihl * 4);
@@ -778,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
778 (skb->len - skb_transport_offset(skb)), 811 (skb->len - skb_transport_offset(skb)),
779 IPPROTO_UDP, rx_udp_csum); 812 IPPROTO_UDP, rx_udp_csum);
780 813
781 if (udp_hdr(skb)->check != csum) { 814 if (udp_hdr(skb)->check != csum)
782 vsi->back->hw_csum_rx_error++; 815 goto checksum_fail;
783 return;
784 }
785 } 816 }
786 817
787 skb->ip_summed = CHECKSUM_UNNECESSARY; 818 skb->ip_summed = CHECKSUM_UNNECESSARY;
819
820 return;
821
822checksum_fail:
823 vsi->back->hw_csum_rx_error++;
788} 824}
789 825
790/** 826/**
@@ -953,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
953 /* ERR_MASK will only have valid bits if EOP set */ 989 /* ERR_MASK will only have valid bits if EOP set */
954 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 990 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
955 dev_kfree_skb_any(skb); 991 dev_kfree_skb_any(skb);
992 /* TODO: shouldn't we increment a counter indicating the
993 * drop?
994 */
956 goto next_desc; 995 goto next_desc;
957 } 996 }
958 997
@@ -1508,9 +1547,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1508static int i40e_xmit_descriptor_count(struct sk_buff *skb, 1547static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1509 struct i40e_ring *tx_ring) 1548 struct i40e_ring *tx_ring)
1510{ 1549{
1511#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1512 unsigned int f; 1550 unsigned int f;
1513#endif
1514 int count = 0; 1551 int count = 0;
1515 1552
1516 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 1553 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -1519,12 +1556,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1519 * + 1 desc for context descriptor, 1556 * + 1 desc for context descriptor,
1520 * otherwise try next time 1557 * otherwise try next time
1521 */ 1558 */
1522#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1523 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1559 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1524 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1560 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1525#else 1561
1526 count += skb_shinfo(skb)->nr_frags;
1527#endif
1528 count += TXD_USE_COUNT(skb_headlen(skb)); 1562 count += TXD_USE_COUNT(skb_headlen(skb));
1529 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1563 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1530 tx_ring->tx_stats.tx_busy++; 1564 tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 10bf49e18d7f..30d248bc5d19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -24,7 +27,7 @@
24#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
25#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
26 29
27/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
28 31
29#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
30#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
66 69
67/* Supported RSS offloads */ 70/* Supported RSS offloads */
68#define I40E_DEFAULT_RSS_HENA ( \ 71#define I40E_DEFAULT_RSS_HENA ( \
69 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
70 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
71 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
80 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ 78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
81 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -119,11 +117,11 @@ enum i40e_dyn_idx_t {
119#define i40e_rx_desc i40e_32byte_rx_desc 117#define i40e_rx_desc i40e_32byte_rx_desc
120 118
121#define I40E_MIN_TX_LEN 17 119#define I40E_MIN_TX_LEN 17
122#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ 120#define I40E_MAX_DATA_PER_TXD 8192
123 121
124/* Tx Descriptors needed, worst case */ 122/* Tx Descriptors needed, worst case */
125#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 123#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
126#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 124#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
127 125
128#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
129#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -180,7 +178,6 @@ enum i40e_ring_state_t {
180 __I40E_TX_DETECT_HANG, 178 __I40E_TX_DETECT_HANG,
181 __I40E_HANG_CHECK_ARMED, 179 __I40E_HANG_CHECK_ARMED,
182 __I40E_RX_PS_ENABLED, 180 __I40E_RX_PS_ENABLED,
183 __I40E_RX_LRO_ENABLED,
184 __I40E_RX_16BYTE_DESC_ENABLED, 181 __I40E_RX_16BYTE_DESC_ENABLED,
185}; 182};
186 183
@@ -196,12 +193,6 @@ enum i40e_ring_state_t {
196 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 193 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
197#define clear_check_for_tx_hang(ring) \ 194#define clear_check_for_tx_hang(ring) \
198 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 195 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
199#define ring_is_lro_enabled(ring) \
200 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
201#define set_ring_lro_enabled(ring) \
202 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
203#define clear_ring_lro_enabled(ring) \
204 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
205#define ring_is_16byte_desc_enabled(ring) \ 196#define ring_is_16byte_desc_enabled(ring) \
206 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) 197 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
207#define set_ring_16byte_desc_enabled(ring) \ 198#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..d3cf5a69de54 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -32,13 +35,11 @@
32#include "i40e_lan_hmc.h" 35#include "i40e_lan_hmc.h"
33 36
34/* Device IDs */ 37/* Device IDs */
35#define I40E_DEV_ID_SFP_XL710 0x1572 38#define I40E_DEV_ID_SFP_XL710 0x1572
36#define I40E_DEV_ID_SFP_X710 0x1573
37#define I40E_DEV_ID_QEMU 0x1574 39#define I40E_DEV_ID_QEMU 0x1574
38#define I40E_DEV_ID_KX_A 0x157F 40#define I40E_DEV_ID_KX_A 0x157F
39#define I40E_DEV_ID_KX_B 0x1580 41#define I40E_DEV_ID_KX_B 0x1580
40#define I40E_DEV_ID_KX_C 0x1581 42#define I40E_DEV_ID_KX_C 0x1581
41#define I40E_DEV_ID_KX_D 0x1582
42#define I40E_DEV_ID_QSFP_A 0x1583 43#define I40E_DEV_ID_QSFP_A 0x1583
43#define I40E_DEV_ID_QSFP_B 0x1584 44#define I40E_DEV_ID_QSFP_B 0x1584
44#define I40E_DEV_ID_QSFP_C 0x1585 45#define I40E_DEV_ID_QSFP_C 0x1585
@@ -57,8 +58,8 @@
57/* Max default timeout in ms, */ 58/* Max default timeout in ms, */
58#define I40E_MAX_NVM_TIMEOUT 18000 59#define I40E_MAX_NVM_TIMEOUT 18000
59 60
60/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ 61/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
61#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) 62#define I40E_MS_TO_GTIME(time) ((time) * 1000)
62 63
63/* forward declaration */ 64/* forward declaration */
64struct i40e_hw; 65struct i40e_hw;
@@ -101,15 +102,6 @@ enum i40e_debug_mask {
101 I40E_DEBUG_ALL = 0xFFFFFFFF 102 I40E_DEBUG_ALL = 0xFFFFFFFF
102}; 103};
103 104
104/* PCI Bus Info */
105#define I40E_PCI_LINK_WIDTH_1 0x10
106#define I40E_PCI_LINK_WIDTH_2 0x20
107#define I40E_PCI_LINK_WIDTH_4 0x40
108#define I40E_PCI_LINK_WIDTH_8 0x80
109#define I40E_PCI_LINK_SPEED_2500 0x1
110#define I40E_PCI_LINK_SPEED_5000 0x2
111#define I40E_PCI_LINK_SPEED_8000 0x3
112
113/* These are structs for managing the hardware information and the operations. 105/* These are structs for managing the hardware information and the operations.
114 * The structures of function pointers are filled out at init time when we 106 * The structures of function pointers are filled out at init time when we
115 * know for sure exactly which hardware we're working with. This gives us the 107 * know for sure exactly which hardware we're working with. This gives us the
@@ -173,6 +165,9 @@ struct i40e_link_status {
173 u8 loopback; 165 u8 loopback;
174 /* is Link Status Event notification to SW enabled */ 166 /* is Link Status Event notification to SW enabled */
175 bool lse_enable; 167 bool lse_enable;
168 u16 max_frame_size;
169 bool crc_enable;
170 u8 pacing;
176}; 171};
177 172
178struct i40e_phy_info { 173struct i40e_phy_info {
@@ -415,6 +410,7 @@ struct i40e_driver_version {
415 u8 minor_version; 410 u8 minor_version;
416 u8 build_version; 411 u8 build_version;
417 u8 subbuild_version; 412 u8 subbuild_version;
413 u8 driver_string[32];
418}; 414};
419 415
420/* RX Descriptors */ 416/* RX Descriptors */
@@ -494,9 +490,6 @@ union i40e_32byte_rx_desc {
494 } wb; /* writeback */ 490 } wb; /* writeback */
495}; 491};
496 492
497#define I40E_RXD_QW1_STATUS_SHIFT 0
498#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
499
500enum i40e_rx_desc_status_bits { 493enum i40e_rx_desc_status_bits {
501 /* Note: These are predefined bit offsets */ 494 /* Note: These are predefined bit offsets */
502 I40E_RX_DESC_STATUS_DD_SHIFT = 0, 495 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -513,9 +506,14 @@ enum i40e_rx_desc_status_bits {
513 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, 506 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
514 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, 507 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
515 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ 508 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
516 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 509 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
510 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
517}; 511};
518 512
513#define I40E_RXD_QW1_STATUS_SHIFT 0
514#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
515 << I40E_RXD_QW1_STATUS_SHIFT)
516
519#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT 517#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
520#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ 518#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
521 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) 519 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -543,7 +541,8 @@ enum i40e_rx_desc_error_bits {
543 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
544 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
545 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
546 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
547}; 546};
548 547
549enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -664,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
664 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, 663 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
665 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ 664 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
666 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ 665 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
667 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
668 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, 666 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
669 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, 667 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
670 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, 668 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
868 866
869/* Packet Classifier Types for filters */ 867/* Packet Classifier Types for filters */
870enum i40e_filter_pctype { 868enum i40e_filter_pctype {
871 /* Note: Values 0-28 are reserved for future use */ 869 /* Note: Values 0-30 are reserved for future use */
872 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
873 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
874 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, 870 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
875 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, 871 /* Note: Value 32 is reserved for future use */
876 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, 872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
877 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, 873 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
878 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, 874 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
879 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, 875 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
880 /* Note: Values 37-38 are reserved for future use */ 876 /* Note: Values 37-40 are reserved for future use */
881 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
882 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
883 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, 877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
884 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, 878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
885 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, 879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -961,6 +955,16 @@ struct i40e_vsi_context {
961 struct i40e_aqc_vsi_properties_data info; 955 struct i40e_aqc_vsi_properties_data info;
962}; 956};
963 957
958struct i40e_veb_context {
959 u16 seid;
960 u16 uplink_seid;
961 u16 veb_number;
962 u16 vebs_allocated;
963 u16 vebs_unallocated;
964 u16 flags;
965 struct i40e_aqc_get_veb_parameters_completion info;
966};
967
964/* Statistics collected by each port, VSI, VEB, and S-channel */ 968/* Statistics collected by each port, VSI, VEB, and S-channel */
965struct i40e_eth_stats { 969struct i40e_eth_stats {
966 u64 rx_bytes; /* gorc */ 970 u64 rx_bytes; /* gorc */
@@ -968,8 +972,6 @@ struct i40e_eth_stats {
968 u64 rx_multicast; /* mprc */ 972 u64 rx_multicast; /* mprc */
969 u64 rx_broadcast; /* bprc */ 973 u64 rx_broadcast; /* bprc */
970 u64 rx_discards; /* rdpc */ 974 u64 rx_discards; /* rdpc */
971 u64 rx_errors; /* repc */
972 u64 rx_missed; /* rmpc */
973 u64 rx_unknown_protocol; /* rupp */ 975 u64 rx_unknown_protocol; /* rupp */
974 u64 tx_bytes; /* gotc */ 976 u64 tx_bytes; /* gotc */
975 u64 tx_unicast; /* uptc */ 977 u64 tx_unicast; /* uptc */
@@ -1021,9 +1023,12 @@ struct i40e_hw_port_stats {
1021 u64 tx_size_big; /* ptc9522 */ 1023 u64 tx_size_big; /* ptc9522 */
1022 u64 mac_short_packet_dropped; /* mspdc */ 1024 u64 mac_short_packet_dropped; /* mspdc */
1023 u64 checksum_error; /* xec */ 1025 u64 checksum_error; /* xec */
1026 /* flow director stats */
1027 u64 fd_atr_match;
1028 u64 fd_sb_match;
1024 /* EEE LPI */ 1029 /* EEE LPI */
1025 bool tx_lpi_status; 1030 u32 tx_lpi_status;
1026 bool rx_lpi_status; 1031 u32 rx_lpi_status;
1027 u64 tx_lpi_count; /* etlpic */ 1032 u64 tx_lpi_count; /* etlpic */
1028 u64 rx_lpi_count; /* erlpic */ 1033 u64 rx_lpi_count; /* erlpic */
1029}; 1034};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index ccf45d04b7ef..cd18d5689006 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -338,10 +341,6 @@ struct i40e_virtchnl_pf_event {
338 int severity; 341 int severity;
339}; 342};
340 343
341/* The following are TBD, not necessary for LAN functionality.
342 * I40E_VIRTCHNL_OP_FCOE
343 */
344
345/* VF reset states - these are written into the RSTAT register: 344/* VF reset states - these are written into the RSTAT register:
346 * I40E_VFGEN_RSTAT1 on the PF 345 * I40E_VFGEN_RSTAT1 on the PF
347 * I40E_VFGEN_RSTAT on the VF 346 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 807807d62387..30ef519d4b91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -77,7 +80,7 @@ struct i40e_vsi {
77#define I40EVF_MIN_TXD 64 80#define I40EVF_MIN_TXD 64
78#define I40EVF_MAX_RXD 4096 81#define I40EVF_MAX_RXD 4096
79#define I40EVF_MIN_RXD 64 82#define I40EVF_MIN_RXD 64
80#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8 83#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
81 84
82/* Supported Rx Buffer Sizes */ 85/* Supported Rx Buffer Sizes */
83#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ 86#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
@@ -193,10 +196,12 @@ struct i40evf_adapter {
193 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 196 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
194 u32 tx_timeout_count; 197 u32 tx_timeout_count;
195 struct list_head mac_filter_list; 198 struct list_head mac_filter_list;
199 u32 tx_desc_count;
196 200
197 /* RX */ 201 /* RX */
198 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; 202 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
199 u64 hw_csum_rx_error; 203 u64 hw_csum_rx_error;
204 u32 rx_desc_count;
200 int num_msix_vectors; 205 int num_msix_vectors;
201 struct msix_entry *msix_entries; 206 struct msix_entry *msix_entries;
202 207
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..60407a9df0c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -44,8 +47,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
44 I40EVF_STAT("rx_multicast", current_stats.rx_multicast), 47 I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
45 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), 48 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
46 I40EVF_STAT("rx_discards", current_stats.rx_discards), 49 I40EVF_STAT("rx_discards", current_stats.rx_discards),
47 I40EVF_STAT("rx_errors", current_stats.rx_errors),
48 I40EVF_STAT("rx_missed", current_stats.rx_missed),
49 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), 50 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
50 I40EVF_STAT("tx_bytes", current_stats.tx_bytes), 51 I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
51 I40EVF_STAT("tx_unicast", current_stats.tx_unicast), 52 I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
@@ -56,10 +57,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
56}; 57};
57 58
58#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) 59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
59#define I40EVF_QUEUE_STATS_LEN \ 60#define I40EVF_QUEUE_STATS_LEN(_dev) \
60 (((struct i40evf_adapter *) \ 61 (((struct i40evf_adapter *) \
61 netdev_priv(netdev))->vsi_res->num_queue_pairs * 4) 62 netdev_priv(_dev))->vsi_res->num_queue_pairs \
62#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN) 63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
64#define I40EVF_STATS_LEN(_dev) \
65 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
63 66
64/** 67/**
65 * i40evf_get_settings - Get Link Speed and Duplex settings 68 * i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +78,7 @@ static int i40evf_get_settings(struct net_device *netdev,
75 /* In the future the VF will be able to query the PF for 78 /* In the future the VF will be able to query the PF for
76 * some information - for now use a dummy value 79 * some information - for now use a dummy value
77 */ 80 */
78 ecmd->supported = SUPPORTED_10000baseT_Full; 81 ecmd->supported = 0;
79 ecmd->autoneg = AUTONEG_DISABLE; 82 ecmd->autoneg = AUTONEG_DISABLE;
80 ecmd->transceiver = XCVR_DUMMY1; 83 ecmd->transceiver = XCVR_DUMMY1;
81 ecmd->port = PORT_NONE; 84 ecmd->port = PORT_NONE;
@@ -94,9 +97,9 @@ static int i40evf_get_settings(struct net_device *netdev,
94static int i40evf_get_sset_count(struct net_device *netdev, int sset) 97static int i40evf_get_sset_count(struct net_device *netdev, int sset)
95{ 98{
96 if (sset == ETH_SS_STATS) 99 if (sset == ETH_SS_STATS)
97 return I40EVF_STATS_LEN; 100 return I40EVF_STATS_LEN(netdev);
98 else 101 else
99 return -ENOTSUPP; 102 return -EINVAL;
100} 103}
101 104
102/** 105/**
@@ -219,13 +222,11 @@ static void i40evf_get_ringparam(struct net_device *netdev,
219 struct ethtool_ringparam *ring) 222 struct ethtool_ringparam *ring)
220{ 223{
221 struct i40evf_adapter *adapter = netdev_priv(netdev); 224 struct i40evf_adapter *adapter = netdev_priv(netdev);
222 struct i40e_ring *tx_ring = adapter->tx_rings[0];
223 struct i40e_ring *rx_ring = adapter->rx_rings[0];
224 225
225 ring->rx_max_pending = I40EVF_MAX_RXD; 226 ring->rx_max_pending = I40EVF_MAX_RXD;
226 ring->tx_max_pending = I40EVF_MAX_TXD; 227 ring->tx_max_pending = I40EVF_MAX_TXD;
227 ring->rx_pending = rx_ring->count; 228 ring->rx_pending = adapter->rx_desc_count;
228 ring->tx_pending = tx_ring->count; 229 ring->tx_pending = adapter->tx_desc_count;
229} 230}
230 231
231/** 232/**
@@ -241,7 +242,6 @@ static int i40evf_set_ringparam(struct net_device *netdev,
241{ 242{
242 struct i40evf_adapter *adapter = netdev_priv(netdev); 243 struct i40evf_adapter *adapter = netdev_priv(netdev);
243 u32 new_rx_count, new_tx_count; 244 u32 new_rx_count, new_tx_count;
244 int i;
245 245
246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
247 return -EINVAL; 247 return -EINVAL;
@@ -257,17 +257,16 @@ static int i40evf_set_ringparam(struct net_device *netdev,
257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); 257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
258 258
259 /* if nothing to do return success */ 259 /* if nothing to do return success */
260 if ((new_tx_count == adapter->tx_rings[0]->count) && 260 if ((new_tx_count == adapter->tx_desc_count) &&
261 (new_rx_count == adapter->rx_rings[0]->count)) 261 (new_rx_count == adapter->rx_desc_count))
262 return 0; 262 return 0;
263 263
264 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 264 adapter->tx_desc_count = new_tx_count;
265 adapter->tx_rings[0]->count = new_tx_count; 265 adapter->rx_desc_count = new_rx_count;
266 adapter->rx_rings[0]->count = new_rx_count;
267 }
268 266
269 if (netif_running(netdev)) 267 if (netif_running(netdev))
270 i40evf_reinit_locked(adapter); 268 i40evf_reinit_locked(adapter);
269
271 return 0; 270 return 0;
272} 271}
273 272
@@ -290,14 +289,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
290 ec->rx_max_coalesced_frames = vsi->work_limit; 289 ec->rx_max_coalesced_frames = vsi->work_limit;
291 290
292 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 291 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
293 ec->rx_coalesce_usecs = 1; 292 ec->use_adaptive_rx_coalesce = 1;
294 else
295 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
296 293
297 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 294 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
298 ec->tx_coalesce_usecs = 1; 295 ec->use_adaptive_tx_coalesce = 1;
299 else 296
300 ec->tx_coalesce_usecs = vsi->tx_itr_setting; 297 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
298 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
301 299
302 return 0; 300 return 0;
303} 301}
@@ -318,54 +316,361 @@ static int i40evf_set_coalesce(struct net_device *netdev,
318 struct i40e_q_vector *q_vector; 316 struct i40e_q_vector *q_vector;
319 int i; 317 int i;
320 318
321 if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames) 319 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
322 vsi->work_limit = ec->tx_max_coalesced_frames; 320 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
321
322 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
323 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
324 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
325
326 else
327 return -EINVAL;
328
329 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
330 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
331 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
332 else if (ec->use_adaptive_tx_coalesce)
333 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
334 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
335 else
336 return -EINVAL;
337
338 if (ec->use_adaptive_rx_coalesce)
339 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
340 else
341 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
342
343 if (ec->use_adaptive_tx_coalesce)
344 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
345 else
346 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
323 347
324 switch (ec->rx_coalesce_usecs) { 348 for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
325 case 0: 349 q_vector = adapter->q_vector[i];
326 vsi->rx_itr_setting = 0; 350 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
351 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
352 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
353 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
354 i40e_flush(hw);
355 }
356
357 return 0;
358}
359
360/**
361 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
362 * @adapter: board private structure
363 * @cmd: ethtool rxnfc command
364 *
365 * Returns Success if the flow is supported, else Invalid Input.
366 **/
367static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
368 struct ethtool_rxnfc *cmd)
369{
370 struct i40e_hw *hw = &adapter->hw;
371 u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
372 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
373
374 /* We always hash on IP src and dest addresses */
375 cmd->data = RXH_IP_SRC | RXH_IP_DST;
376
377 switch (cmd->flow_type) {
378 case TCP_V4_FLOW:
379 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
380 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
327 break; 381 break;
328 case 1: 382 case UDP_V4_FLOW:
329 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC 383 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
330 | ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); 384 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
331 break; 385 break;
332 default: 386
333 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || 387 case SCTP_V4_FLOW:
334 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) 388 case AH_ESP_V4_FLOW:
335 return -EINVAL; 389 case AH_V4_FLOW:
336 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 390 case ESP_V4_FLOW:
391 case IPV4_FLOW:
392 break;
393
394 case TCP_V6_FLOW:
395 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
396 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
337 break; 397 break;
398 case UDP_V6_FLOW:
399 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
400 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
401 break;
402
403 case SCTP_V6_FLOW:
404 case AH_ESP_V6_FLOW:
405 case AH_V6_FLOW:
406 case ESP_V6_FLOW:
407 case IPV6_FLOW:
408 break;
409 default:
410 cmd->data = 0;
411 return -EINVAL;
338 } 412 }
339 413
340 switch (ec->tx_coalesce_usecs) { 414 return 0;
341 case 0: 415}
342 vsi->tx_itr_setting = 0; 416
417/**
418 * i40evf_get_rxnfc - command to get RX flow classification rules
419 * @netdev: network interface device structure
420 * @cmd: ethtool rxnfc command
421 *
422 * Returns Success if the command is supported.
423 **/
424static int i40evf_get_rxnfc(struct net_device *netdev,
425 struct ethtool_rxnfc *cmd,
426 u32 *rule_locs)
427{
428 struct i40evf_adapter *adapter = netdev_priv(netdev);
429 int ret = -EOPNOTSUPP;
430
431 switch (cmd->cmd) {
432 case ETHTOOL_GRXRINGS:
433 cmd->data = adapter->vsi_res->num_queue_pairs;
434 ret = 0;
343 break; 435 break;
344 case 1: 436 case ETHTOOL_GRXFH:
345 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC 437 ret = i40evf_get_rss_hash_opts(adapter, cmd);
346 | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
347 break; 438 break;
348 default: 439 default:
349 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || 440 break;
350 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) 441 }
442
443 return ret;
444}
445
446/**
447 * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
448 * @adapter: board private structure
449 * @cmd: ethtool rxnfc command
450 *
451 * Returns Success if the flow input set is supported.
452 **/
453static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
454 struct ethtool_rxnfc *nfc)
455{
456 struct i40e_hw *hw = &adapter->hw;
457
458 u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
459 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
460
461 /* RSS does not support anything other than hashing
462 * to queues on src and dst IPs and ports
463 */
464 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
465 RXH_L4_B_0_1 | RXH_L4_B_2_3))
466 return -EINVAL;
467
468 /* We need at least the IP SRC and DEST fields for hashing */
469 if (!(nfc->data & RXH_IP_SRC) ||
470 !(nfc->data & RXH_IP_DST))
471 return -EINVAL;
472
473 switch (nfc->flow_type) {
474 case TCP_V4_FLOW:
475 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
476 case 0:
477 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
478 break;
479 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
480 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
481 break;
482 default:
351 return -EINVAL; 483 return -EINVAL;
352 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 484 }
485 break;
486 case TCP_V6_FLOW:
487 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
488 case 0:
489 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
490 break;
491 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
492 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
493 break;
494 default:
495 return -EINVAL;
496 }
497 break;
498 case UDP_V4_FLOW:
499 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
500 case 0:
501 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
502 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
503 break;
504 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
505 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
506 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
507 break;
508 default:
509 return -EINVAL;
510 }
353 break; 511 break;
512 case UDP_V6_FLOW:
513 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
514 case 0:
515 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
516 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
517 break;
518 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
519 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
520 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
521 break;
522 default:
523 return -EINVAL;
524 }
525 break;
526 case AH_ESP_V4_FLOW:
527 case AH_V4_FLOW:
528 case ESP_V4_FLOW:
529 case SCTP_V4_FLOW:
530 if ((nfc->data & RXH_L4_B_0_1) ||
531 (nfc->data & RXH_L4_B_2_3))
532 return -EINVAL;
533 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
534 break;
535 case AH_ESP_V6_FLOW:
536 case AH_V6_FLOW:
537 case ESP_V6_FLOW:
538 case SCTP_V6_FLOW:
539 if ((nfc->data & RXH_L4_B_0_1) ||
540 (nfc->data & RXH_L4_B_2_3))
541 return -EINVAL;
542 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
543 break;
544 case IPV4_FLOW:
545 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
546 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
547 break;
548 case IPV6_FLOW:
549 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
550 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
551 break;
552 default:
553 return -EINVAL;
354 } 554 }
355 555
356 for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { 556 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
357 q_vector = adapter->q_vector[i]; 557 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
358 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 558 i40e_flush(hw);
359 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); 559
360 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 560 return 0;
361 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr); 561}
362 i40e_flush(hw); 562
563/**
564 * i40evf_set_rxnfc - command to set RX flow classification rules
565 * @netdev: network interface device structure
566 * @cmd: ethtool rxnfc command
567 *
568 * Returns Success if the command is supported.
569 **/
570static int i40evf_set_rxnfc(struct net_device *netdev,
571 struct ethtool_rxnfc *cmd)
572{
573 struct i40evf_adapter *adapter = netdev_priv(netdev);
574 int ret = -EOPNOTSUPP;
575
576 switch (cmd->cmd) {
577 case ETHTOOL_SRXFH:
578 ret = i40evf_set_rss_hash_opt(adapter, cmd);
579 break;
580 default:
581 break;
582 }
583
584 return ret;
585}
586
587/**
588 * i40evf_get_channels: get the number of channels supported by the device
589 * @netdev: network interface device structure
590 * @ch: channel information structure
591 *
592 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
593 * queue pair. Report one extra channel to match our "other" MSI-X vector.
594 **/
595static void i40evf_get_channels(struct net_device *netdev,
596 struct ethtool_channels *ch)
597{
598 struct i40evf_adapter *adapter = netdev_priv(netdev);
599
600 /* Report maximum channels */
601 ch->max_combined = adapter->vsi_res->num_queue_pairs;
602
603 ch->max_other = NONQ_VECS;
604 ch->other_count = NONQ_VECS;
605
606 ch->combined_count = adapter->vsi_res->num_queue_pairs;
607}
608
609/**
610 * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
611 * @netdev: network interface device structure
612 *
613 * Returns the table size.
614 **/
615static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
616{
617 return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
618}
619
620/**
621 * i40evf_get_rxfh - get the rx flow hash indirection table
622 * @netdev: network interface device structure
623 * @indir: indirection table
624 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
625 *
626 * Reads the indirection table directly from the hardware. Always returns 0.
627 **/
628static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
629{
630 struct i40evf_adapter *adapter = netdev_priv(netdev);
631 struct i40e_hw *hw = &adapter->hw;
632 u32 hlut_val;
633 int i, j;
634
635 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
636 hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
637 indir[j++] = hlut_val & 0xff;
638 indir[j++] = (hlut_val >> 8) & 0xff;
639 indir[j++] = (hlut_val >> 16) & 0xff;
640 indir[j++] = (hlut_val >> 24) & 0xff;
641 }
642 return 0;
643}
644
645/**
646 * i40evf_set_rxfh - set the rx flow hash indirection table
647 * @netdev: network interface device structure
648 * @indir: indirection table
649 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
650 *
651 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
652 * returns 0 after programming the table.
653 **/
654static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
655 const u8 *key)
656{
657 struct i40evf_adapter *adapter = netdev_priv(netdev);
658 struct i40e_hw *hw = &adapter->hw;
659 u32 hlut_val;
660 int i, j;
661
662 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
663 hlut_val = indir[j++];
664 hlut_val |= indir[j++] << 8;
665 hlut_val |= indir[j++] << 16;
666 hlut_val |= indir[j++] << 24;
667 wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
363 } 668 }
364 669
365 return 0; 670 return 0;
366} 671}
367 672
368static struct ethtool_ops i40evf_ethtool_ops = { 673static const struct ethtool_ops i40evf_ethtool_ops = {
369 .get_settings = i40evf_get_settings, 674 .get_settings = i40evf_get_settings,
370 .get_drvinfo = i40evf_get_drvinfo, 675 .get_drvinfo = i40evf_get_drvinfo,
371 .get_link = ethtool_op_get_link, 676 .get_link = ethtool_op_get_link,
@@ -378,6 +683,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
378 .set_msglevel = i40evf_set_msglevel, 683 .set_msglevel = i40evf_set_msglevel,
379 .get_coalesce = i40evf_get_coalesce, 684 .get_coalesce = i40evf_get_coalesce,
380 .set_coalesce = i40evf_set_coalesce, 685 .set_coalesce = i40evf_set_coalesce,
686 .get_rxnfc = i40evf_get_rxnfc,
687 .set_rxnfc = i40evf_set_rxnfc,
688 .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
689 .get_rxfh = i40evf_get_rxfh,
690 .set_rxfh = i40evf_set_rxfh,
691 .get_channels = i40evf_get_channels,
381}; 692};
382 693
383/** 694/**
@@ -389,5 +700,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
389 **/ 700 **/
390void i40evf_set_ethtool_ops(struct net_device *netdev) 701void i40evf_set_ethtool_ops(struct net_device *netdev)
391{ 702{
392 SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops); 703 netdev->ethtool_ops = &i40evf_ethtool_ops;
393} 704}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..7fc5f3b5d6bf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -25,13 +28,15 @@
25#include "i40e_prototype.h" 28#include "i40e_prototype.h"
26static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); 29static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
27static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); 30static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
31static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
32static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
28static int i40evf_close(struct net_device *netdev); 33static int i40evf_close(struct net_device *netdev);
29 34
30char i40evf_driver_name[] = "i40evf"; 35char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 37 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 38
34#define DRV_VERSION "0.9.16" 39#define DRV_VERSION "0.9.34"
35const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
37 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -167,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
167 struct i40evf_adapter *adapter = netdev_priv(netdev); 172 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 173
169 adapter->tx_timeout_count++; 174 adapter->tx_timeout_count++;
170 dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
171 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { 175 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
172 adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 176 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
173 schedule_work(&adapter->reset_task); 177 schedule_work(&adapter->reset_task);
@@ -657,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
657 f = i40evf_find_vlan(adapter, vlan); 661 f = i40evf_find_vlan(adapter, vlan);
658 if (NULL == f) { 662 if (NULL == f) {
659 f = kzalloc(sizeof(*f), GFP_ATOMIC); 663 f = kzalloc(sizeof(*f), GFP_ATOMIC);
660 if (NULL == f) { 664 if (NULL == f)
661 dev_info(&adapter->pdev->dev,
662 "%s: no memory for new VLAN filter\n",
663 __func__);
664 return NULL; 665 return NULL;
665 } 666
666 f->vlan = vlan; 667 f->vlan = vlan;
667 668
668 INIT_LIST_HEAD(&f->list); 669 INIT_LIST_HEAD(&f->list);
@@ -688,7 +689,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
688 f->remove = true; 689 f->remove = true;
689 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 690 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
690 } 691 }
691 return;
692} 692}
693 693
694/** 694/**
@@ -767,14 +767,12 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
767 if (NULL == f) { 767 if (NULL == f) {
768 f = kzalloc(sizeof(*f), GFP_ATOMIC); 768 f = kzalloc(sizeof(*f), GFP_ATOMIC);
769 if (NULL == f) { 769 if (NULL == f) {
770 dev_info(&adapter->pdev->dev,
771 "%s: no memory for new filter\n", __func__);
772 clear_bit(__I40EVF_IN_CRITICAL_TASK, 770 clear_bit(__I40EVF_IN_CRITICAL_TASK,
773 &adapter->crit_section); 771 &adapter->crit_section);
774 return NULL; 772 return NULL;
775 } 773 }
776 774
777 memcpy(f->macaddr, macaddr, ETH_ALEN); 775 ether_addr_copy(f->macaddr, macaddr);
778 776
779 list_add(&f->list, &adapter->mac_filter_list); 777 list_add(&f->list, &adapter->mac_filter_list);
780 f->add = true; 778 f->add = true;
@@ -807,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
807 805
808 f = i40evf_add_filter(adapter, addr->sa_data); 806 f = i40evf_add_filter(adapter, addr->sa_data);
809 if (f) { 807 if (f) {
810 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 808 ether_addr_copy(hw->mac.addr, addr->sa_data);
811 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 809 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
812 netdev->addr_len);
813 } 810 }
814 811
815 return (f == NULL) ? -ENOMEM : 0; 812 return (f == NULL) ? -ENOMEM : 0;
@@ -841,7 +838,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
841 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 838 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
842 bool found = false; 839 bool found = false;
843 840
844 if (f->macaddr[0] & 0x01) { 841 if (is_multicast_ether_addr(f->macaddr)) {
845 netdev_for_each_mc_addr(mca, netdev) { 842 netdev_for_each_mc_addr(mca, netdev) {
846 if (ether_addr_equal(mca->addr, f->macaddr)) { 843 if (ether_addr_equal(mca->addr, f->macaddr)) {
847 found = true; 844 found = true;
@@ -970,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
970 struct net_device *netdev = adapter->netdev; 967 struct net_device *netdev = adapter->netdev;
971 struct i40evf_mac_filter *f; 968 struct i40evf_mac_filter *f;
972 969
970 if (adapter->state == __I40EVF_DOWN)
971 return;
972
973 /* remove all MAC filters */ 973 /* remove all MAC filters */
974 list_for_each_entry(f, &adapter->mac_filter_list, list) { 974 list_for_each_entry(f, &adapter->mac_filter_list, list) {
975 f->remove = true; 975 f->remove = true;
@@ -1027,30 +1027,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1027 * Right now, we simply care about how many we'll get; we'll 1027 * Right now, we simply care about how many we'll get; we'll
1028 * set them up later while requesting irq's. 1028 * set them up later while requesting irq's.
1029 */ 1029 */
1030 while (vectors >= vector_threshold) { 1030 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1031 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1031 vector_threshold, vectors);
1032 vectors); 1032 if (err < 0) {
1033 if (!err) /* Success in acquiring all requested vectors. */ 1033 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1034 break;
1035 else if (err < 0)
1036 vectors = 0; /* Nasty failure, quit now */
1037 else /* err == number of vectors we should try again with */
1038 vectors = err;
1039 }
1040
1041 if (vectors < vector_threshold) {
1042 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
1043 kfree(adapter->msix_entries); 1034 kfree(adapter->msix_entries);
1044 adapter->msix_entries = NULL; 1035 adapter->msix_entries = NULL;
1045 err = -EIO; 1036 return err;
1046 } else {
1047 /* Adjust for only the vectors we'll use, which is minimum
1048 * of max_msix_q_vectors + NONQ_VECS, or the number of
1049 * vectors we were allocated.
1050 */
1051 adapter->num_msix_vectors = vectors;
1052 } 1037 }
1053 return err; 1038
1039 /* Adjust for only the vectors we'll use, which is minimum
1040 * of max_msix_q_vectors + NONQ_VECS, or the number of
1041 * vectors we were allocated.
1042 */
1043 adapter->num_msix_vectors = err;
1044 return 0;
1054} 1045}
1055 1046
1056/** 1047/**
@@ -1096,14 +1087,14 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1096 tx_ring->queue_index = i; 1087 tx_ring->queue_index = i;
1097 tx_ring->netdev = adapter->netdev; 1088 tx_ring->netdev = adapter->netdev;
1098 tx_ring->dev = &adapter->pdev->dev; 1089 tx_ring->dev = &adapter->pdev->dev;
1099 tx_ring->count = I40EVF_DEFAULT_TXD; 1090 tx_ring->count = adapter->tx_desc_count;
1100 adapter->tx_rings[i] = tx_ring; 1091 adapter->tx_rings[i] = tx_ring;
1101 1092
1102 rx_ring = &tx_ring[1]; 1093 rx_ring = &tx_ring[1];
1103 rx_ring->queue_index = i; 1094 rx_ring->queue_index = i;
1104 rx_ring->netdev = adapter->netdev; 1095 rx_ring->netdev = adapter->netdev;
1105 rx_ring->dev = &adapter->pdev->dev; 1096 rx_ring->dev = &adapter->pdev->dev;
1106 rx_ring->count = I40EVF_DEFAULT_RXD; 1097 rx_ring->count = adapter->rx_desc_count;
1107 adapter->rx_rings[i] = rx_ring; 1098 adapter->rx_rings[i] = rx_ring;
1108 } 1099 }
1109 1100
@@ -1141,9 +1132,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1141 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1132 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1142 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); 1133 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1143 1134
1144 /* A failure in MSI-X entry allocation isn't fatal, but it does
1145 * mean we disable MSI-X capabilities of the adapter.
1146 */
1147 adapter->msix_entries = kcalloc(v_budget, 1135 adapter->msix_entries = kcalloc(v_budget,
1148 sizeof(struct msix_entry), GFP_KERNEL); 1136 sizeof(struct msix_entry), GFP_KERNEL);
1149 if (!adapter->msix_entries) { 1137 if (!adapter->msix_entries) {
@@ -1183,7 +1171,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1183 q_vector->vsi = &adapter->vsi; 1171 q_vector->vsi = &adapter->vsi;
1184 q_vector->v_idx = q_idx; 1172 q_vector->v_idx = q_idx;
1185 netif_napi_add(adapter->netdev, &q_vector->napi, 1173 netif_napi_add(adapter->netdev, &q_vector->napi,
1186 i40evf_napi_poll, 64); 1174 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1187 adapter->q_vector[q_idx] = q_vector; 1175 adapter->q_vector[q_idx] = q_vector;
1188 } 1176 }
1189 1177
@@ -1236,8 +1224,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1236 pci_disable_msix(adapter->pdev); 1224 pci_disable_msix(adapter->pdev);
1237 kfree(adapter->msix_entries); 1225 kfree(adapter->msix_entries);
1238 adapter->msix_entries = NULL; 1226 adapter->msix_entries = NULL;
1239
1240 return;
1241} 1227}
1242 1228
1243/** 1229/**
@@ -1309,7 +1295,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
1309 goto restart_watchdog; 1295 goto restart_watchdog;
1310 1296
1311 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { 1297 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1312 dev_info(&adapter->pdev->dev, "Checking for redemption\n");
1313 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) { 1298 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
1314 /* A chance for redemption! */ 1299 /* A chance for redemption! */
1315 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); 1300 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1340,8 +1325,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
1340 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1325 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
1341 adapter->state = __I40EVF_RESETTING; 1326 adapter->state = __I40EVF_RESETTING;
1342 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1327 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1343 dev_err(&adapter->pdev->dev, "Hardware reset detected.\n"); 1328 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1344 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1345 schedule_work(&adapter->reset_task); 1329 schedule_work(&adapter->reset_task);
1346 adapter->aq_pending = 0; 1330 adapter->aq_pending = 0;
1347 adapter->aq_required = 0; 1331 adapter->aq_required = 0;
@@ -1413,7 +1397,7 @@ restart_watchdog:
1413} 1397}
1414 1398
1415/** 1399/**
1416 * i40evf_configure_rss - increment to next available tx queue 1400 * next_queue - increment to next available tx queue
1417 * @adapter: board private structure 1401 * @adapter: board private structure
1418 * @j: queue counter 1402 * @j: queue counter
1419 * 1403 *
@@ -1504,15 +1488,12 @@ static void i40evf_reset_task(struct work_struct *work)
1504 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1488 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1505 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1489 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1506 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1490 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1507 if (rstat_val != I40E_VFR_VFACTIVE) { 1491 if (rstat_val != I40E_VFR_VFACTIVE)
1508 dev_info(&adapter->pdev->dev, "Reset now occurring\n");
1509 break; 1492 break;
1510 } else { 1493 else
1511 msleep(I40EVF_RESET_WAIT_MS); 1494 msleep(I40EVF_RESET_WAIT_MS);
1512 }
1513 } 1495 }
1514 if (i == I40EVF_RESET_WAIT_COUNT) { 1496 if (i == I40EVF_RESET_WAIT_COUNT) {
1515 dev_err(&adapter->pdev->dev, "Reset was not detected\n");
1516 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1497 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1517 goto continue_reset; /* act like the reset happened */ 1498 goto continue_reset; /* act like the reset happened */
1518 } 1499 }
@@ -1521,22 +1502,24 @@ static void i40evf_reset_task(struct work_struct *work)
1521 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1502 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1522 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1503 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1523 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1504 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1524 if (rstat_val == I40E_VFR_VFACTIVE) { 1505 if (rstat_val == I40E_VFR_VFACTIVE)
1525 dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
1526 break; 1506 break;
1527 } else { 1507 else
1528 msleep(I40EVF_RESET_WAIT_MS); 1508 msleep(I40EVF_RESET_WAIT_MS);
1529 }
1530 } 1509 }
1531 if (i == I40EVF_RESET_WAIT_COUNT) { 1510 if (i == I40EVF_RESET_WAIT_COUNT) {
1532 /* reset never finished */ 1511 /* reset never finished */
1533 dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n", 1512 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1534 rstat_val); 1513 rstat_val);
1535 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 1514 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1536 1515
1537 if (netif_running(adapter->netdev)) 1516 if (netif_running(adapter->netdev)) {
1538 i40evf_close(adapter->netdev); 1517 set_bit(__I40E_DOWN, &adapter->vsi.state);
1539 1518 i40evf_down(adapter);
1519 i40evf_free_traffic_irqs(adapter);
1520 i40evf_free_all_tx_resources(adapter);
1521 i40evf_free_all_rx_resources(adapter);
1522 }
1540 i40evf_free_misc_irq(adapter); 1523 i40evf_free_misc_irq(adapter);
1541 i40evf_reset_interrupt_capability(adapter); 1524 i40evf_reset_interrupt_capability(adapter);
1542 i40evf_free_queues(adapter); 1525 i40evf_free_queues(adapter);
@@ -1591,7 +1574,7 @@ continue_reset:
1591 } 1574 }
1592 return; 1575 return;
1593reset_err: 1576reset_err:
1594 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); 1577 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1595 i40evf_close(adapter->netdev); 1578 i40evf_close(adapter->netdev);
1596} 1579}
1597 1580
@@ -1607,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
1607 struct i40e_arq_event_info event; 1590 struct i40e_arq_event_info event;
1608 struct i40e_virtchnl_msg *v_msg; 1591 struct i40e_virtchnl_msg *v_msg;
1609 i40e_status ret; 1592 i40e_status ret;
1593 u32 val, oldval;
1610 u16 pending; 1594 u16 pending;
1611 1595
1612 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 1596 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
@@ -1614,11 +1598,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1614 1598
1615 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1599 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1616 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1600 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
1617 if (!event.msg_buf) { 1601 if (!event.msg_buf)
1618 dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
1619 __func__);
1620 return; 1602 return;
1621 } 1603
1622 v_msg = (struct i40e_virtchnl_msg *)&event.desc; 1604 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1623 do { 1605 do {
1624 ret = i40evf_clean_arq_element(hw, &event, &pending); 1606 ret = i40evf_clean_arq_element(hw, &event, &pending);
@@ -1636,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
1636 } 1618 }
1637 } while (pending); 1619 } while (pending);
1638 1620
1621 /* check for error indications */
1622 val = rd32(hw, hw->aq.arq.len);
1623 oldval = val;
1624 if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
1625 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
1626 val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1627 }
1628 if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1629 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
1630 val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1631 }
1632 if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1633 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
1634 val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1635 }
1636 if (oldval != val)
1637 wr32(hw, hw->aq.arq.len, val);
1638
1639 val = rd32(hw, hw->aq.asq.len);
1640 oldval = val;
1641 if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
1642 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
1643 val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1644 }
1645 if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1646 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
1647 val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1648 }
1649 if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1650 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
1651 val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1652 }
1653 if (oldval != val)
1654 wr32(hw, hw->aq.asq.len, val);
1655
1639 /* re-enable Admin queue interrupt cause */ 1656 /* re-enable Admin queue interrupt cause */
1640 i40evf_misc_irq_enable(adapter); 1657 i40evf_misc_irq_enable(adapter);
1641 1658
@@ -1673,6 +1690,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
1673 int i, err = 0; 1690 int i, err = 0;
1674 1691
1675 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1692 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
1693 adapter->tx_rings[i]->count = adapter->tx_desc_count;
1676 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); 1694 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
1677 if (!err) 1695 if (!err)
1678 continue; 1696 continue;
@@ -1700,6 +1718,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
1700 int i, err = 0; 1718 int i, err = 0;
1701 1719
1702 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1720 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
1721 adapter->rx_rings[i]->count = adapter->rx_desc_count;
1703 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); 1722 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
1704 if (!err) 1723 if (!err)
1705 continue; 1724 continue;
@@ -1804,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
1804 if (adapter->state <= __I40EVF_DOWN) 1823 if (adapter->state <= __I40EVF_DOWN)
1805 return 0; 1824 return 0;
1806 1825
1807 /* signal that we are down to the interrupt handler */
1808 adapter->state = __I40EVF_DOWN;
1809 1826
1810 set_bit(__I40E_DOWN, &adapter->vsi.state); 1827 set_bit(__I40E_DOWN, &adapter->vsi.state);
1811 1828
1812 i40evf_down(adapter); 1829 i40evf_down(adapter);
1830 adapter->state = __I40EVF_DOWN;
1813 i40evf_free_traffic_irqs(adapter); 1831 i40evf_free_traffic_irqs(adapter);
1814 1832
1815 i40evf_free_all_tx_resources(adapter); 1833 i40evf_free_all_tx_resources(adapter);
@@ -1848,8 +1866,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
1848 1866
1849 WARN_ON(in_interrupt()); 1867 WARN_ON(in_interrupt());
1850 1868
1851 adapter->state = __I40EVF_RESETTING;
1852
1853 i40evf_down(adapter); 1869 i40evf_down(adapter);
1854 1870
1855 /* allocate transmit descriptors */ 1871 /* allocate transmit descriptors */
@@ -1872,7 +1888,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
1872 return; 1888 return;
1873 1889
1874err_reinit: 1890err_reinit:
1875 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); 1891 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1876 i40evf_close(netdev); 1892 i40evf_close(netdev);
1877} 1893}
1878 1894
@@ -1967,7 +1983,7 @@ static void i40evf_init_task(struct work_struct *work)
1967 } 1983 }
1968 err = i40evf_check_reset_complete(hw); 1984 err = i40evf_check_reset_complete(hw);
1969 if (err) { 1985 if (err) {
1970 dev_err(&pdev->dev, "Device is still in reset (%d)\n", 1986 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1971 err); 1987 err);
1972 goto err; 1988 goto err;
1973 } 1989 }
@@ -1993,14 +2009,14 @@ static void i40evf_init_task(struct work_struct *work)
1993 break; 2009 break;
1994 case __I40EVF_INIT_VERSION_CHECK: 2010 case __I40EVF_INIT_VERSION_CHECK:
1995 if (!i40evf_asq_done(hw)) { 2011 if (!i40evf_asq_done(hw)) {
1996 dev_err(&pdev->dev, "Admin queue command never completed.\n"); 2012 dev_err(&pdev->dev, "Admin queue command never completed\n");
1997 goto err; 2013 goto err;
1998 } 2014 }
1999 2015
2000 /* aq msg sent, awaiting reply */ 2016 /* aq msg sent, awaiting reply */
2001 err = i40evf_verify_api_ver(adapter); 2017 err = i40evf_verify_api_ver(adapter);
2002 if (err) { 2018 if (err) {
2003 dev_err(&pdev->dev, "Unable to verify API version (%d)\n", 2019 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
2004 err); 2020 err);
2005 goto err; 2021 goto err;
2006 } 2022 }
@@ -2074,12 +2090,12 @@ static void i40evf_init_task(struct work_struct *work)
2074 netdev->hw_features &= ~NETIF_F_RXCSUM; 2090 netdev->hw_features &= ~NETIF_F_RXCSUM;
2075 2091
2076 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2092 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2077 dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", 2093 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2078 adapter->hw.mac.addr); 2094 adapter->hw.mac.addr);
2079 random_ether_addr(adapter->hw.mac.addr); 2095 random_ether_addr(adapter->hw.mac.addr);
2080 } 2096 }
2081 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2097 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2082 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2098 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2083 2099
2084 INIT_LIST_HEAD(&adapter->mac_filter_list); 2100 INIT_LIST_HEAD(&adapter->mac_filter_list);
2085 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2101 INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2087,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
2087 if (NULL == f) 2103 if (NULL == f)
2088 goto err_sw_init; 2104 goto err_sw_init;
2089 2105
2090 memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN); 2106 ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
2091 f->add = true; 2107 f->add = true;
2092 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 2108 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
2093 2109
@@ -2098,6 +2114,8 @@ static void i40evf_init_task(struct work_struct *work)
2098 adapter->watchdog_timer.data = (unsigned long)adapter; 2114 adapter->watchdog_timer.data = (unsigned long)adapter;
2099 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2115 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2100 2116
2117 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2118 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2101 err = i40evf_init_interrupt_scheme(adapter); 2119 err = i40evf_init_interrupt_scheme(adapter);
2102 if (err) 2120 if (err)
2103 goto err_sw_init; 2121 goto err_sw_init;
@@ -2114,8 +2132,10 @@ static void i40evf_init_task(struct work_struct *work)
2114 adapter->vsi.back = adapter; 2132 adapter->vsi.back = adapter;
2115 adapter->vsi.base_vector = 1; 2133 adapter->vsi.base_vector = 1;
2116 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; 2134 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2117 adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC; 2135 adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
2118 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; 2136 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
2137 adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
2138 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
2119 adapter->vsi.netdev = adapter->netdev; 2139 adapter->vsi.netdev = adapter->netdev;
2120 2140
2121 if (!adapter->netdev_registered) { 2141 if (!adapter->netdev_registered) {
@@ -2128,7 +2148,7 @@ static void i40evf_init_task(struct work_struct *work)
2128 2148
2129 netif_tx_stop_all_queues(netdev); 2149 netif_tx_stop_all_queues(netdev);
2130 2150
2131 dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr); 2151 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2132 if (netdev->features & NETIF_F_GRO) 2152 if (netdev->features & NETIF_F_GRO)
2133 dev_info(&pdev->dev, "GRO is enabled\n"); 2153 dev_info(&pdev->dev, "GRO is enabled\n");
2134 2154
@@ -2152,12 +2172,11 @@ err_alloc:
2152err: 2172err:
2153 /* Things went into the weeds, so try again later */ 2173 /* Things went into the weeds, so try again later */
2154 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { 2174 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2155 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); 2175 dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
2156 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 2176 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2157 return; /* do not reschedule */ 2177 return; /* do not reschedule */
2158 } 2178 }
2159 schedule_delayed_work(&adapter->init_task, HZ * 3); 2179 schedule_delayed_work(&adapter->init_task, HZ * 3);
2160 return;
2161} 2180}
2162 2181
2163/** 2182/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e294f012647d..2dc0bac76717 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -216,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
216 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + 219 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
217 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); 220 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
218 vqci = kzalloc(len, GFP_ATOMIC); 221 vqci = kzalloc(len, GFP_ATOMIC);
219 if (!vqci) { 222 if (!vqci)
220 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
221 __func__);
222 return; 223 return;
223 } 224
224 vqci->vsi_id = adapter->vsi_res->vsi_id; 225 vqci->vsi_id = adapter->vsi_res->vsi_id;
225 vqci->num_queue_pairs = pairs; 226 vqci->num_queue_pairs = pairs;
226 vqpi = vqci->qpair; 227 vqpi = vqci->qpair;
@@ -232,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
232 vqpi->txq.queue_id = i; 233 vqpi->txq.queue_id = i;
233 vqpi->txq.ring_len = adapter->tx_rings[i]->count; 234 vqpi->txq.ring_len = adapter->tx_rings[i]->count;
234 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; 235 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
236 vqpi->txq.headwb_enabled = 1;
237 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
238 (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
235 239
236 vqpi->rxq.vsi_id = vqci->vsi_id; 240 vqpi->rxq.vsi_id = vqci->vsi_id;
237 vqpi->rxq.queue_id = i; 241 vqpi->rxq.queue_id = i;
@@ -329,11 +333,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
329 (adapter->num_msix_vectors * 333 (adapter->num_msix_vectors *
330 sizeof(struct i40e_virtchnl_vector_map)); 334 sizeof(struct i40e_virtchnl_vector_map));
331 vimi = kzalloc(len, GFP_ATOMIC); 335 vimi = kzalloc(len, GFP_ATOMIC);
332 if (!vimi) { 336 if (!vimi)
333 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
334 __func__);
335 return; 337 return;
336 }
337 338
338 vimi->num_vectors = adapter->num_msix_vectors; 339 vimi->num_vectors = adapter->num_msix_vectors;
339 /* Queue vectors first */ 340 /* Queue vectors first */
@@ -390,7 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
390 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 391 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
391 (count * sizeof(struct i40e_virtchnl_ether_addr)); 392 (count * sizeof(struct i40e_virtchnl_ether_addr));
392 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 393 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
393 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", 394 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
394 __func__); 395 __func__);
395 count = (I40EVF_MAX_AQ_BUF_SIZE - 396 count = (I40EVF_MAX_AQ_BUF_SIZE -
396 sizeof(struct i40e_virtchnl_ether_addr_list)) / 397 sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -399,16 +400,14 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
399 } 400 }
400 401
401 veal = kzalloc(len, GFP_ATOMIC); 402 veal = kzalloc(len, GFP_ATOMIC);
402 if (!veal) { 403 if (!veal)
403 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
404 __func__);
405 return; 404 return;
406 } 405
407 veal->vsi_id = adapter->vsi_res->vsi_id; 406 veal->vsi_id = adapter->vsi_res->vsi_id;
408 veal->num_elements = count; 407 veal->num_elements = count;
409 list_for_each_entry(f, &adapter->mac_filter_list, list) { 408 list_for_each_entry(f, &adapter->mac_filter_list, list) {
410 if (f->add) { 409 if (f->add) {
411 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); 410 ether_addr_copy(veal->list[i].addr, f->macaddr);
412 i++; 411 i++;
413 f->add = false; 412 f->add = false;
414 } 413 }
@@ -454,7 +453,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
454 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 453 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
455 (count * sizeof(struct i40e_virtchnl_ether_addr)); 454 (count * sizeof(struct i40e_virtchnl_ether_addr));
456 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 455 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
457 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", 456 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
458 __func__); 457 __func__);
459 count = (I40EVF_MAX_AQ_BUF_SIZE - 458 count = (I40EVF_MAX_AQ_BUF_SIZE -
460 sizeof(struct i40e_virtchnl_ether_addr_list)) / 459 sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -462,16 +461,14 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
462 len = I40EVF_MAX_AQ_BUF_SIZE; 461 len = I40EVF_MAX_AQ_BUF_SIZE;
463 } 462 }
464 veal = kzalloc(len, GFP_ATOMIC); 463 veal = kzalloc(len, GFP_ATOMIC);
465 if (!veal) { 464 if (!veal)
466 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
467 __func__);
468 return; 465 return;
469 } 466
470 veal->vsi_id = adapter->vsi_res->vsi_id; 467 veal->vsi_id = adapter->vsi_res->vsi_id;
471 veal->num_elements = count; 468 veal->num_elements = count;
472 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 469 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
473 if (f->remove) { 470 if (f->remove) {
474 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); 471 ether_addr_copy(veal->list[i].addr, f->macaddr);
475 i++; 472 i++;
476 list_del(&f->list); 473 list_del(&f->list);
477 kfree(f); 474 kfree(f);
@@ -518,7 +515,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
518 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 515 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
519 (count * sizeof(u16)); 516 (count * sizeof(u16));
520 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 517 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
521 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", 518 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
522 __func__); 519 __func__);
523 count = (I40EVF_MAX_AQ_BUF_SIZE - 520 count = (I40EVF_MAX_AQ_BUF_SIZE -
524 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 521 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -526,11 +523,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
526 len = I40EVF_MAX_AQ_BUF_SIZE; 523 len = I40EVF_MAX_AQ_BUF_SIZE;
527 } 524 }
528 vvfl = kzalloc(len, GFP_ATOMIC); 525 vvfl = kzalloc(len, GFP_ATOMIC);
529 if (!vvfl) { 526 if (!vvfl)
530 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
531 __func__);
532 return; 527 return;
533 } 528
534 vvfl->vsi_id = adapter->vsi_res->vsi_id; 529 vvfl->vsi_id = adapter->vsi_res->vsi_id;
535 vvfl->num_elements = count; 530 vvfl->num_elements = count;
536 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 531 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
@@ -580,7 +575,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
580 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 575 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
581 (count * sizeof(u16)); 576 (count * sizeof(u16));
582 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 577 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
583 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", 578 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
584 __func__); 579 __func__);
585 count = (I40EVF_MAX_AQ_BUF_SIZE - 580 count = (I40EVF_MAX_AQ_BUF_SIZE -
586 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 581 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -588,11 +583,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
588 len = I40EVF_MAX_AQ_BUF_SIZE; 583 len = I40EVF_MAX_AQ_BUF_SIZE;
589 } 584 }
590 vvfl = kzalloc(len, GFP_ATOMIC); 585 vvfl = kzalloc(len, GFP_ATOMIC);
591 if (!vvfl) { 586 if (!vvfl)
592 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
593 __func__);
594 return; 587 return;
595 } 588
596 vvfl->vsi_id = adapter->vsi_res->vsi_id; 589 vvfl->vsi_id = adapter->vsi_res->vsi_id;
597 vvfl->num_elements = count; 590 vvfl->num_elements = count;
598 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 591 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
@@ -721,7 +714,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
721 return; 714 return;
722 } 715 }
723 if (v_opcode != adapter->current_op) { 716 if (v_opcode != adapter->current_op) {
724 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n", 717 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
725 __func__, adapter->current_op, v_opcode); 718 __func__, adapter->current_op, v_opcode);
726 /* We're probably completely screwed at this point, but clear 719 /* We're probably completely screwed at this point, but clear
727 * the current op and try to carry on.... 720 * the current op and try to carry on....
@@ -730,7 +723,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
730 return; 723 return;
731 } 724 }
732 if (v_retval) { 725 if (v_retval) {
733 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n", 726 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
734 __func__, v_retval, v_opcode); 727 __func__, v_retval, v_opcode);
735 } 728 }
736 switch (v_opcode) { 729 switch (v_opcode) {
@@ -745,9 +738,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
745 stats->tx_broadcast; 738 stats->tx_broadcast;
746 adapter->net_stats.rx_bytes = stats->rx_bytes; 739 adapter->net_stats.rx_bytes = stats->rx_bytes;
747 adapter->net_stats.tx_bytes = stats->tx_bytes; 740 adapter->net_stats.tx_bytes = stats->tx_bytes;
748 adapter->net_stats.rx_errors = stats->rx_errors;
749 adapter->net_stats.tx_errors = stats->tx_errors; 741 adapter->net_stats.tx_errors = stats->tx_errors;
750 adapter->net_stats.rx_dropped = stats->rx_missed; 742 adapter->net_stats.rx_dropped = stats->rx_discards;
751 adapter->net_stats.tx_dropped = stats->tx_discards; 743 adapter->net_stats.tx_dropped = stats->tx_discards;
752 adapter->current_stats = *stats; 744 adapter->current_stats = *stats;
753 } 745 }
@@ -781,7 +773,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
781 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); 773 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
782 break; 774 break;
783 default: 775 default:
784 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n", 776 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
785 __func__, v_opcode); 777 __func__, v_opcode);
786 break; 778 break;
787 } /* switch v_opcode */ 779 } /* switch v_opcode */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..a2db388cc31e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27/* e1000_82575 24/* e1000_82575
28 * e1000_82576 25 * e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
73static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 70static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
74static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 71static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
75static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 72static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
76static const u16 e1000_82580_rxpbs_table[] = 73static const u16 e1000_82580_rxpbs_table[] = {
77 { 36, 72, 144, 1, 2, 4, 8, 16, 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
78 35, 70, 140 };
79 75
80/** 76/**
81 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -159,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
159 ret_val = igb_check_for_link_82575(hw); 155 ret_val = igb_check_for_link_82575(hw);
160 } 156 }
161 157
162 return E1000_SUCCESS; 158 return 0;
163} 159}
164 160
165/** 161/**
@@ -526,7 +522,7 @@ out:
526static s32 igb_get_invariants_82575(struct e1000_hw *hw) 522static s32 igb_get_invariants_82575(struct e1000_hw *hw)
527{ 523{
528 struct e1000_mac_info *mac = &hw->mac; 524 struct e1000_mac_info *mac = &hw->mac;
529 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
530 s32 ret_val; 526 s32 ret_val;
531 u32 ctrl_ext = 0; 527 u32 ctrl_ext = 0;
532 u32 link_mode = 0; 528 u32 link_mode = 0;
@@ -1008,7 +1004,6 @@ out:
1008static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1004static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1009{ 1005{
1010 struct e1000_phy_info *phy = &hw->phy; 1006 struct e1000_phy_info *phy = &hw->phy;
1011 s32 ret_val = 0;
1012 u16 data; 1007 u16 data;
1013 1008
1014 data = rd32(E1000_82580_PHY_POWER_MGMT); 1009 data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1032,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1032 data &= ~E1000_82580_PM_SPD; } 1027 data &= ~E1000_82580_PM_SPD; }
1033 1028
1034 wr32(E1000_82580_PHY_POWER_MGMT, data); 1029 wr32(E1000_82580_PHY_POWER_MGMT, data);
1035 return ret_val; 1030 return 0;
1036} 1031}
1037 1032
1038/** 1033/**
@@ -1052,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1052static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1047static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1053{ 1048{
1054 struct e1000_phy_info *phy = &hw->phy; 1049 struct e1000_phy_info *phy = &hw->phy;
1055 s32 ret_val = 0;
1056 u16 data; 1050 u16 data;
1057 1051
1058 data = rd32(E1000_82580_PHY_POWER_MGMT); 1052 data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1077,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1077 } 1071 }
1078 1072
1079 wr32(E1000_82580_PHY_POWER_MGMT, data); 1073 wr32(E1000_82580_PHY_POWER_MGMT, data);
1080 return ret_val; 1074 return 0;
1081} 1075}
1082 1076
1083/** 1077/**
@@ -1180,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1180{ 1174{
1181 u32 swfw_sync; 1175 u32 swfw_sync;
1182 1176
1183 while (igb_get_hw_semaphore(hw) != 0); 1177 while (igb_get_hw_semaphore(hw) != 0)
1184 /* Empty */ 1178 ; /* Empty */
1185 1179
1186 swfw_sync = rd32(E1000_SW_FW_SYNC); 1180 swfw_sync = rd32(E1000_SW_FW_SYNC);
1187 swfw_sync &= ~mask; 1181 swfw_sync &= ~mask;
@@ -1203,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1203static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1197static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1204{ 1198{
1205 s32 timeout = PHY_CFG_TIMEOUT; 1199 s32 timeout = PHY_CFG_TIMEOUT;
1206 s32 ret_val = 0;
1207 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1200 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1208 1201
1209 if (hw->bus.func == 1) 1202 if (hw->bus.func == 1)
@@ -1216,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1216 while (timeout) { 1209 while (timeout) {
1217 if (rd32(E1000_EEMNGCTL) & mask) 1210 if (rd32(E1000_EEMNGCTL) & mask)
1218 break; 1211 break;
1219 msleep(1); 1212 usleep_range(1000, 2000);
1220 timeout--; 1213 timeout--;
1221 } 1214 }
1222 if (!timeout) 1215 if (!timeout)
@@ -1227,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1227 (hw->phy.type == e1000_phy_igp_3)) 1220 (hw->phy.type == e1000_phy_igp_3))
1228 igb_phy_init_script_igp3(hw); 1221 igb_phy_init_script_igp3(hw);
1229 1222
1230 return ret_val; 1223 return 0;
1231} 1224}
1232 1225
1233/** 1226/**
@@ -1269,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1269 1262
1270 if (hw->phy.media_type != e1000_media_type_copper) { 1263 if (hw->phy.media_type != e1000_media_type_copper) {
1271 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1264 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1272 &duplex); 1265 &duplex);
1273 /* Use this flag to determine if link needs to be checked or 1266 /* Use this flag to determine if link needs to be checked or
1274 * not. If we have link clear the flag so that we do not 1267 * not. If we have link clear the flag so that we do not
1275 * continue to check for link. 1268 * continue to check for link.
@@ -1316,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1316 1309
1317 /* flush the write to verify completion */ 1310 /* flush the write to verify completion */
1318 wrfl(); 1311 wrfl();
1319 msleep(1); 1312 usleep_range(1000, 2000);
1320} 1313}
1321 1314
1322/** 1315/**
@@ -1411,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1411 1404
1412 /* flush the write to verify completion */ 1405 /* flush the write to verify completion */
1413 wrfl(); 1406 wrfl();
1414 msleep(1); 1407 usleep_range(1000, 2000);
1415 } 1408 }
1416} 1409}
1417 1410
@@ -1436,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1436 1429
1437 /* set the completion timeout for interface */ 1430 /* set the completion timeout for interface */
1438 ret_val = igb_set_pcie_completion_timeout(hw); 1431 ret_val = igb_set_pcie_completion_timeout(hw);
1439 if (ret_val) { 1432 if (ret_val)
1440 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1433 hw_dbg("PCI-E Set completion timeout has failed.\n");
1441 }
1442 1434
1443 hw_dbg("Masking off all interrupts\n"); 1435 hw_dbg("Masking off all interrupts\n");
1444 wr32(E1000_IMC, 0xffffffff); 1436 wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1447 wr32(E1000_TCTL, E1000_TCTL_PSP); 1439 wr32(E1000_TCTL, E1000_TCTL_PSP);
1448 wrfl(); 1440 wrfl();
1449 1441
1450 msleep(10); 1442 usleep_range(10000, 20000);
1451 1443
1452 ctrl = rd32(E1000_CTRL); 1444 ctrl = rd32(E1000_CTRL);
1453 1445
@@ -1622,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1622{ 1614{
1623 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1615 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1624 bool pcs_autoneg; 1616 bool pcs_autoneg;
1625 s32 ret_val = E1000_SUCCESS; 1617 s32 ret_val = 0;
1626 u16 data; 1618 u16 data;
1627 1619
1628 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1620 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1676,7 +1668,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1676 hw->mac.type == e1000_82576) { 1668 hw->mac.type == e1000_82576) {
1677 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1669 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1678 if (ret_val) { 1670 if (ret_val) {
1679 printk(KERN_DEBUG "NVM Read Error\n\n"); 1671 hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1680 return ret_val; 1672 return ret_val;
1681 } 1673 }
1682 1674
@@ -1689,7 +1681,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1689 * link either autoneg or be forced to 1000/Full 1681 * link either autoneg or be forced to 1000/Full
1690 */ 1682 */
1691 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1683 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1692 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1684 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1693 1685
1694 /* set speed of 1000/Full if speed/duplex is forced */ 1686 /* set speed of 1000/Full if speed/duplex is forced */
1695 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1687 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1917,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1925 } 1917 }
1926 /* Poll all queues to verify they have shut down */ 1918 /* Poll all queues to verify they have shut down */
1927 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1919 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1928 msleep(1); 1920 usleep_range(1000, 2000);
1929 rx_enabled = 0; 1921 rx_enabled = 0;
1930 for (i = 0; i < 4; i++) 1922 for (i = 0; i < 4; i++)
1931 rx_enabled |= rd32(E1000_RXDCTL(i)); 1923 rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1945,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1953 wr32(E1000_RCTL, temp_rctl); 1945 wr32(E1000_RCTL, temp_rctl);
1954 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1946 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1955 wrfl(); 1947 wrfl();
1956 msleep(2); 1948 usleep_range(2000, 3000);
1957 1949
1958 /* Enable RX queues that were previously enabled and restore our 1950 /* Enable RX queues that were previously enabled and restore our
1959 * previous state 1951 * previous state
@@ -2005,14 +1997,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2005 * 16ms to 55ms 1997 * 16ms to 55ms
2006 */ 1998 */
2007 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1999 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2008 &pcie_devctl2); 2000 &pcie_devctl2);
2009 if (ret_val) 2001 if (ret_val)
2010 goto out; 2002 goto out;
2011 2003
2012 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2004 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2013 2005
2014 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2006 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2015 &pcie_devctl2); 2007 &pcie_devctl2);
2016out: 2008out:
2017 /* disable completion timeout resend */ 2009 /* disable completion timeout resend */
2018 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2010 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2233,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2241 wr32(E1000_TCTL, E1000_TCTL_PSP); 2233 wr32(E1000_TCTL, E1000_TCTL_PSP);
2242 wrfl(); 2234 wrfl();
2243 2235
2244 msleep(10); 2236 usleep_range(10000, 11000);
2245 2237
2246 /* Determine whether or not a global dev reset is requested */ 2238 /* Determine whether or not a global dev reset is requested */
2247 if (global_device_reset && 2239 if (global_device_reset &&
@@ -2259,7 +2251,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2259 2251
2260 /* Add delay to insure DEV_RST has time to complete */ 2252 /* Add delay to insure DEV_RST has time to complete */
2261 if (global_device_reset) 2253 if (global_device_reset)
2262 msleep(5); 2254 usleep_range(5000, 6000);
2263 2255
2264 ret_val = igb_get_auto_rd_done(hw); 2256 ret_val = igb_get_auto_rd_done(hw);
2265 if (ret_val) { 2257 if (ret_val) {
@@ -2436,8 +2428,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2436 2428
2437 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2429 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2438 if (ret_val) { 2430 if (ret_val) {
2439 hw_dbg("NVM Read Error while updating checksum" 2431 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2440 " compatibility bit.\n");
2441 goto out; 2432 goto out;
2442 } 2433 }
2443 2434
@@ -2447,8 +2438,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2447 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2438 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2448 &nvm_data); 2439 &nvm_data);
2449 if (ret_val) { 2440 if (ret_val) {
2450 hw_dbg("NVM Write Error while updating checksum" 2441 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2451 " compatibility bit.\n");
2452 goto out; 2442 goto out;
2453 } 2443 }
2454 } 2444 }
@@ -2525,7 +2515,7 @@ out:
2525static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2515static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2526 u16 *data, bool read) 2516 u16 *data, bool read)
2527{ 2517{
2528 s32 ret_val = E1000_SUCCESS; 2518 s32 ret_val = 0;
2529 2519
2530 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2520 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2531 if (ret_val) 2521 if (ret_val)
@@ -2559,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2559 **/ 2549 **/
2560s32 igb_set_eee_i350(struct e1000_hw *hw) 2550s32 igb_set_eee_i350(struct e1000_hw *hw)
2561{ 2551{
2562 s32 ret_val = 0;
2563 u32 ipcnfg, eeer; 2552 u32 ipcnfg, eeer;
2564 2553
2565 if ((hw->mac.type < e1000_i350) || 2554 if ((hw->mac.type < e1000_i350) ||
@@ -2593,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2593 rd32(E1000_EEER); 2582 rd32(E1000_EEER);
2594out: 2583out:
2595 2584
2596 return ret_val; 2585 return 0;
2597} 2586}
2598 2587
2599/** 2588/**
@@ -2720,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
2720 **/ 2709 **/
2721static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2710static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2722{ 2711{
2723 s32 status = E1000_SUCCESS;
2724 u16 ets_offset; 2712 u16 ets_offset;
2725 u16 ets_cfg; 2713 u16 ets_cfg;
2726 u16 ets_sensor; 2714 u16 ets_sensor;
@@ -2738,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2738 /* Return the internal sensor only if ETS is unsupported */ 2726 /* Return the internal sensor only if ETS is unsupported */
2739 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2727 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2740 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2728 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2741 return status; 2729 return 0;
2742 2730
2743 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2731 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2744 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2732 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2762,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2762 E1000_I2C_THERMAL_SENSOR_ADDR, 2750 E1000_I2C_THERMAL_SENSOR_ADDR,
2763 &data->sensor[i].temp); 2751 &data->sensor[i].temp);
2764 } 2752 }
2765 return status; 2753 return 0;
2766} 2754}
2767 2755
2768/** 2756/**
@@ -2774,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2774 **/ 2762 **/
2775static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2763static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2776{ 2764{
2777 s32 status = E1000_SUCCESS;
2778 u16 ets_offset; 2765 u16 ets_offset;
2779 u16 ets_cfg; 2766 u16 ets_cfg;
2780 u16 ets_sensor; 2767 u16 ets_sensor;
@@ -2800,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2800 /* Return the internal sensor only if ETS is unsupported */ 2787 /* Return the internal sensor only if ETS is unsupported */
2801 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2788 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2802 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2789 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2803 return status; 2790 return 0;
2804 2791
2805 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2792 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2806 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2793 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2831,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2831 low_thresh_delta; 2818 low_thresh_delta;
2832 } 2819 }
2833 } 2820 }
2834 return status; 2821 return 0;
2835} 2822}
2836 2823
2837#endif 2824#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_82575_H_ 24#ifndef _E1000_82575_H_
28#define _E1000_82575_H_ 25#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
37 u8 data); 34 u8 data);
38 35
39#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ 36#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
40 (ID_LED_DEF1_DEF2 << 8) | \ 37 (ID_LED_DEF1_DEF2 << 8) | \
41 (ID_LED_DEF1_DEF2 << 4) | \ 38 (ID_LED_DEF1_DEF2 << 4) | \
42 (ID_LED_OFF1_ON2)) 39 (ID_LED_OFF1_ON2))
43 40
44#define E1000_RAR_ENTRIES_82575 16 41#define E1000_RAR_ENTRIES_82575 16
45#define E1000_RAR_ENTRIES_82576 24 42#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
67#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 64#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
68 65
69#define E1000_EICR_TX_QUEUE ( \ 66#define E1000_EICR_TX_QUEUE ( \
70 E1000_EICR_TX_QUEUE0 | \ 67 E1000_EICR_TX_QUEUE0 | \
71 E1000_EICR_TX_QUEUE1 | \ 68 E1000_EICR_TX_QUEUE1 | \
72 E1000_EICR_TX_QUEUE2 | \ 69 E1000_EICR_TX_QUEUE2 | \
73 E1000_EICR_TX_QUEUE3) 70 E1000_EICR_TX_QUEUE3)
74 71
75#define E1000_EICR_RX_QUEUE ( \ 72#define E1000_EICR_RX_QUEUE ( \
76 E1000_EICR_RX_QUEUE0 | \ 73 E1000_EICR_RX_QUEUE0 | \
77 E1000_EICR_RX_QUEUE1 | \ 74 E1000_EICR_RX_QUEUE1 | \
78 E1000_EICR_RX_QUEUE2 | \ 75 E1000_EICR_RX_QUEUE2 | \
79 E1000_EICR_RX_QUEUE3) 76 E1000_EICR_RX_QUEUE3)
80 77
81/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 78/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
82#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 79#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
92 struct { 89 struct {
93 struct { 90 struct {
94 __le16 pkt_info; /* RSS type, Packet type */ 91 __le16 pkt_info; /* RSS type, Packet type */
95 __le16 hdr_info; /* Split Header, 92 __le16 hdr_info; /* Split Head, buf len */
96 * header buffer length */
97 } lo_dword; 93 } lo_dword;
98 union { 94 union {
99 __le32 rss; /* RSS Hash */ 95 __le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..2a8bb35c2df2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_DEFINES_H_ 24#ifndef _E1000_DEFINES_H_
28#define _E1000_DEFINES_H_ 25#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
101 98
102/* Same mask, but for extended and packet split descriptors */ 99/* Same mask, but for extended and packet split descriptors */
103#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 100#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
104 E1000_RXDEXT_STATERR_CE | \ 101 E1000_RXDEXT_STATERR_CE | \
105 E1000_RXDEXT_STATERR_SE | \ 102 E1000_RXDEXT_STATERR_SE | \
106 E1000_RXDEXT_STATERR_SEQ | \ 103 E1000_RXDEXT_STATERR_SEQ | \
107 E1000_RXDEXT_STATERR_CXE | \ 104 E1000_RXDEXT_STATERR_CXE | \
108 E1000_RXDEXT_STATERR_RXE) 105 E1000_RXDEXT_STATERR_RXE)
109 106
110#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 107#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
111#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 108#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,39 +304,34 @@
307#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 304#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
308 305
309/* DMA Coalescing register fields */ 306/* DMA Coalescing register fields */
310#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing 307#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
311 * Watchdog Timer */ 308#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
312#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
313 * Threshold */
314#define E1000_DMACR_DMACTHR_SHIFT 16 309#define E1000_DMACR_DMACTHR_SHIFT 16
315#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe 310#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
316 * transactions */
317#define E1000_DMACR_DMAC_LX_SHIFT 28 311#define E1000_DMACR_DMAC_LX_SHIFT 28
318#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ 312#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
319/* DMA Coalescing BMC-to-OS Watchdog Enable */ 313/* DMA Coalescing BMC-to-OS Watchdog Enable */
320#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 314#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
321 315
322#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit 316#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
323 * Threshold */
324 317
325#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ 318#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
326 319
327#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate 320#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
328 * Threshold */ 321#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
329#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
330 * current window */
331 322
332#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic 323#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
333 * Current Cnt */
334 324
335#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold 325#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
336 * High val */
337#define E1000_FCRTC_RTH_COAL_SHIFT 4 326#define E1000_FCRTC_RTH_COAL_SHIFT 4
338#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ 327#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
339 328
340/* Timestamp in Rx buffer */ 329/* Timestamp in Rx buffer */
341#define E1000_RXPBS_CFG_TS_EN 0x80000000 330#define E1000_RXPBS_CFG_TS_EN 0x80000000
342 331
332#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
333#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
334
343/* SerDes Control */ 335/* SerDes Control */
344#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 336#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
345 337
@@ -406,12 +398,12 @@
406 * o LSC = Link Status Change 398 * o LSC = Link Status Change
407 */ 399 */
408#define IMS_ENABLE_MASK ( \ 400#define IMS_ENABLE_MASK ( \
409 E1000_IMS_RXT0 | \ 401 E1000_IMS_RXT0 | \
410 E1000_IMS_TXDW | \ 402 E1000_IMS_TXDW | \
411 E1000_IMS_RXDMT0 | \ 403 E1000_IMS_RXDMT0 | \
412 E1000_IMS_RXSEQ | \ 404 E1000_IMS_RXSEQ | \
413 E1000_IMS_LSC | \ 405 E1000_IMS_LSC | \
414 E1000_IMS_DOUTSYNC) 406 E1000_IMS_DOUTSYNC)
415 407
416/* Interrupt Mask Set */ 408/* Interrupt Mask Set */
417#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 409#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -467,7 +459,6 @@
467#define E1000_RAH_POOL_1 0x00040000 459#define E1000_RAH_POOL_1 0x00040000
468 460
469/* Error Codes */ 461/* Error Codes */
470#define E1000_SUCCESS 0
471#define E1000_ERR_NVM 1 462#define E1000_ERR_NVM 1
472#define E1000_ERR_PHY 2 463#define E1000_ERR_PHY 2
473#define E1000_ERR_CONFIG 3 464#define E1000_ERR_CONFIG 3
@@ -1011,8 +1002,7 @@
1011#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 1002#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
1012 1003
1013/* DMA Coalescing register fields */ 1004/* DMA Coalescing register fields */
1014#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based 1005#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
1015 on DMA coal */
1016 1006
1017/* Tx Rate-Scheduler Config fields */ 1007/* Tx Rate-Scheduler Config fields */
1018#define E1000_RTTBCNRC_RS_ENA 0x80000000 1008#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 *
7 under the terms and conditions of the GNU General Public License, 7 * This program is distributed in the hope it will be useful, but WITHOUT
8 version 2, as published by the Free Software Foundation. 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * more details.
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 *
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * You should have received a copy of the GNU General Public License along with
13 more details. 13 * this program; if not, see <http://www.gnu.org/licenses/>.
14 14 *
15 You should have received a copy of the GNU General Public License along with 15 * The full GNU General Public License is included in this distribution in
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * the file called "COPYING".
17 17 *
18 The full GNU General Public License is included in this distribution in 18 * Contact Information:
19 the file called "COPYING". 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 Contact Information: 21 */
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 22
27#ifndef _E1000_HW_H_ 23#ifndef _E1000_HW_H_
28#define _E1000_HW_H_ 24#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
320#include "e1000_mbx.h" 316#include "e1000_mbx.h"
321 317
322struct e1000_mac_operations { 318struct e1000_mac_operations {
323 s32 (*check_for_link)(struct e1000_hw *); 319 s32 (*check_for_link)(struct e1000_hw *);
324 s32 (*reset_hw)(struct e1000_hw *); 320 s32 (*reset_hw)(struct e1000_hw *);
325 s32 (*init_hw)(struct e1000_hw *); 321 s32 (*init_hw)(struct e1000_hw *);
326 bool (*check_mng_mode)(struct e1000_hw *); 322 bool (*check_mng_mode)(struct e1000_hw *);
327 s32 (*setup_physical_interface)(struct e1000_hw *); 323 s32 (*setup_physical_interface)(struct e1000_hw *);
328 void (*rar_set)(struct e1000_hw *, u8 *, u32); 324 void (*rar_set)(struct e1000_hw *, u8 *, u32);
329 s32 (*read_mac_addr)(struct e1000_hw *); 325 s32 (*read_mac_addr)(struct e1000_hw *);
330 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 326 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
331 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); 327 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
332 void (*release_swfw_sync)(struct e1000_hw *, u16); 328 void (*release_swfw_sync)(struct e1000_hw *, u16);
333#ifdef CONFIG_IGB_HWMON 329#ifdef CONFIG_IGB_HWMON
334 s32 (*get_thermal_sensor_data)(struct e1000_hw *); 330 s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
338}; 334};
339 335
340struct e1000_phy_operations { 336struct e1000_phy_operations {
341 s32 (*acquire)(struct e1000_hw *); 337 s32 (*acquire)(struct e1000_hw *);
342 s32 (*check_polarity)(struct e1000_hw *); 338 s32 (*check_polarity)(struct e1000_hw *);
343 s32 (*check_reset_block)(struct e1000_hw *); 339 s32 (*check_reset_block)(struct e1000_hw *);
344 s32 (*force_speed_duplex)(struct e1000_hw *); 340 s32 (*force_speed_duplex)(struct e1000_hw *);
345 s32 (*get_cfg_done)(struct e1000_hw *hw); 341 s32 (*get_cfg_done)(struct e1000_hw *hw);
346 s32 (*get_cable_length)(struct e1000_hw *); 342 s32 (*get_cable_length)(struct e1000_hw *);
347 s32 (*get_phy_info)(struct e1000_hw *); 343 s32 (*get_phy_info)(struct e1000_hw *);
348 s32 (*read_reg)(struct e1000_hw *, u32, u16 *); 344 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
349 void (*release)(struct e1000_hw *); 345 void (*release)(struct e1000_hw *);
350 s32 (*reset)(struct e1000_hw *); 346 s32 (*reset)(struct e1000_hw *);
351 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 347 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
352 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 348 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
353 s32 (*write_reg)(struct e1000_hw *, u32, u16); 349 s32 (*write_reg)(struct e1000_hw *, u32, u16);
354 s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); 350 s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
355 s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); 351 s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
356}; 352};
357 353
358struct e1000_nvm_operations { 354struct e1000_nvm_operations {
359 s32 (*acquire)(struct e1000_hw *); 355 s32 (*acquire)(struct e1000_hw *);
360 s32 (*read)(struct e1000_hw *, u16, u16, u16 *); 356 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
361 void (*release)(struct e1000_hw *); 357 void (*release)(struct e1000_hw *);
362 s32 (*write)(struct e1000_hw *, u16, u16, u16 *); 358 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
363 s32 (*update)(struct e1000_hw *); 359 s32 (*update)(struct e1000_hw *);
364 s32 (*validate)(struct e1000_hw *); 360 s32 (*validate)(struct e1000_hw *);
365 s32 (*valid_led_default)(struct e1000_hw *, u16 *); 361 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
366}; 362};
367 363
368#define E1000_MAX_SENSORS 3 364#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..337161f440dd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26 23
27/* e1000_i210 24/* e1000_i210
28 * e1000_i211 25 * e1000_i211
@@ -100,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
100 return -E1000_ERR_NVM; 97 return -E1000_ERR_NVM;
101 } 98 }
102 99
103 return E1000_SUCCESS; 100 return 0;
104} 101}
105 102
106/** 103/**
@@ -142,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
142 u32 swfw_sync; 139 u32 swfw_sync;
143 u32 swmask = mask; 140 u32 swmask = mask;
144 u32 fwmask = mask << 16; 141 u32 fwmask = mask << 16;
145 s32 ret_val = E1000_SUCCESS; 142 s32 ret_val = 0;
146 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 143 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
147 144
148 while (i < timeout) { 145 while (i < timeout) {
@@ -187,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
187{ 184{
188 u32 swfw_sync; 185 u32 swfw_sync;
189 186
190 while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS) 187 while (igb_get_hw_semaphore_i210(hw))
191 ; /* Empty */ 188 ; /* Empty */
192 189
193 swfw_sync = rd32(E1000_SW_FW_SYNC); 190 swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -210,7 +207,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
210static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 207static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
211 u16 *data) 208 u16 *data)
212{ 209{
213 s32 status = E1000_SUCCESS; 210 s32 status = 0;
214 u16 i, count; 211 u16 i, count;
215 212
216 /* We cannot hold synchronization semaphores for too long, 213 /* We cannot hold synchronization semaphores for too long,
@@ -220,7 +217,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
220 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 217 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
221 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 218 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
222 E1000_EERD_EEWR_MAX_COUNT : (words - i); 219 E1000_EERD_EEWR_MAX_COUNT : (words - i);
223 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 220 if (!(hw->nvm.ops.acquire(hw))) {
224 status = igb_read_nvm_eerd(hw, offset, count, 221 status = igb_read_nvm_eerd(hw, offset, count,
225 data + i); 222 data + i);
226 hw->nvm.ops.release(hw); 223 hw->nvm.ops.release(hw);
@@ -228,7 +225,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
228 status = E1000_ERR_SWFW_SYNC; 225 status = E1000_ERR_SWFW_SYNC;
229 } 226 }
230 227
231 if (status != E1000_SUCCESS) 228 if (status)
232 break; 229 break;
233 } 230 }
234 231
@@ -253,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
253 struct e1000_nvm_info *nvm = &hw->nvm; 250 struct e1000_nvm_info *nvm = &hw->nvm;
254 u32 i, k, eewr = 0; 251 u32 i, k, eewr = 0;
255 u32 attempts = 100000; 252 u32 attempts = 100000;
256 s32 ret_val = E1000_SUCCESS; 253 s32 ret_val = 0;
257 254
258 /* A check for invalid values: offset too large, too many words, 255 /* A check for invalid values: offset too large, too many words,
259 * too many words for the offset, and not enough words. 256 * too many words for the offset, and not enough words.
@@ -275,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
275 for (k = 0; k < attempts; k++) { 272 for (k = 0; k < attempts; k++) {
276 if (E1000_NVM_RW_REG_DONE & 273 if (E1000_NVM_RW_REG_DONE &
277 rd32(E1000_SRWR)) { 274 rd32(E1000_SRWR)) {
278 ret_val = E1000_SUCCESS; 275 ret_val = 0;
279 break; 276 break;
280 } 277 }
281 udelay(5); 278 udelay(5);
282 } 279 }
283 280
284 if (ret_val != E1000_SUCCESS) { 281 if (ret_val) {
285 hw_dbg("Shadow RAM write EEWR timed out\n"); 282 hw_dbg("Shadow RAM write EEWR timed out\n");
286 break; 283 break;
287 } 284 }
@@ -310,7 +307,7 @@ out:
310static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 307static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
311 u16 *data) 308 u16 *data)
312{ 309{
313 s32 status = E1000_SUCCESS; 310 s32 status = 0;
314 u16 i, count; 311 u16 i, count;
315 312
316 /* We cannot hold synchronization semaphores for too long, 313 /* We cannot hold synchronization semaphores for too long,
@@ -320,7 +317,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
320 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 317 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
321 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 318 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
322 E1000_EERD_EEWR_MAX_COUNT : (words - i); 319 E1000_EERD_EEWR_MAX_COUNT : (words - i);
323 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 320 if (!(hw->nvm.ops.acquire(hw))) {
324 status = igb_write_nvm_srwr(hw, offset, count, 321 status = igb_write_nvm_srwr(hw, offset, count,
325 data + i); 322 data + i);
326 hw->nvm.ops.release(hw); 323 hw->nvm.ops.release(hw);
@@ -328,7 +325,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
328 status = E1000_ERR_SWFW_SYNC; 325 status = E1000_ERR_SWFW_SYNC;
329 } 326 }
330 327
331 if (status != E1000_SUCCESS) 328 if (status)
332 break; 329 break;
333 } 330 }
334 331
@@ -367,12 +364,12 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 364 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x\n", 365 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 366 address, *data);
370 status = E1000_SUCCESS; 367 status = 0;
371 break; 368 break;
372 } 369 }
373 } 370 }
374 } 371 }
375 if (status != E1000_SUCCESS) 372 if (status)
376 hw_dbg("Requested word 0x%02x not found in OTP\n", address); 373 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
377 return status; 374 return status;
378} 375}
@@ -388,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
388static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, 385static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
389 u16 words __always_unused, u16 *data) 386 u16 words __always_unused, u16 *data)
390{ 387{
391 s32 ret_val = E1000_SUCCESS; 388 s32 ret_val = 0;
392 389
393 /* Only the MAC addr is required to be present in the iNVM */ 390 /* Only the MAC addr is required to be present in the iNVM */
394 switch (offset) { 391 switch (offset) {
@@ -398,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
398 &data[1]); 395 &data[1]);
399 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, 396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
400 &data[2]); 397 &data[2]);
401 if (ret_val != E1000_SUCCESS) 398 if (ret_val)
402 hw_dbg("MAC Addr not found in iNVM\n"); 399 hw_dbg("MAC Addr not found in iNVM\n");
403 break; 400 break;
404 case NVM_INIT_CTRL_2: 401 case NVM_INIT_CTRL_2:
405 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 402 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
406 if (ret_val != E1000_SUCCESS) { 403 if (ret_val) {
407 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 404 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
408 ret_val = E1000_SUCCESS; 405 ret_val = 0;
409 } 406 }
410 break; 407 break;
411 case NVM_INIT_CTRL_4: 408 case NVM_INIT_CTRL_4:
412 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 409 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
413 if (ret_val != E1000_SUCCESS) { 410 if (ret_val) {
414 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 411 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
415 ret_val = E1000_SUCCESS; 412 ret_val = 0;
416 } 413 }
417 break; 414 break;
418 case NVM_LED_1_CFG: 415 case NVM_LED_1_CFG:
419 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 416 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
420 if (ret_val != E1000_SUCCESS) { 417 if (ret_val) {
421 *data = NVM_LED_1_CFG_DEFAULT_I211; 418 *data = NVM_LED_1_CFG_DEFAULT_I211;
422 ret_val = E1000_SUCCESS; 419 ret_val = 0;
423 } 420 }
424 break; 421 break;
425 case NVM_LED_0_2_CFG: 422 case NVM_LED_0_2_CFG:
426 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 423 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
427 if (ret_val != E1000_SUCCESS) { 424 if (ret_val) {
428 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 425 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
429 ret_val = E1000_SUCCESS; 426 ret_val = 0;
430 } 427 }
431 break; 428 break;
432 case NVM_ID_LED_SETTINGS: 429 case NVM_ID_LED_SETTINGS:
433 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 430 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
434 if (ret_val != E1000_SUCCESS) { 431 if (ret_val) {
435 *data = ID_LED_RESERVED_FFFF; 432 *data = ID_LED_RESERVED_FFFF;
436 ret_val = E1000_SUCCESS; 433 ret_val = 0;
437 } 434 }
435 break;
438 case NVM_SUB_DEV_ID: 436 case NVM_SUB_DEV_ID:
439 *data = hw->subsystem_device_id; 437 *data = hw->subsystem_device_id;
440 break; 438 break;
@@ -488,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
488 /* Check if we have first version location used */ 486 /* Check if we have first version location used */
489 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { 487 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
490 version = 0; 488 version = 0;
491 status = E1000_SUCCESS; 489 status = 0;
492 break; 490 break;
493 } 491 }
494 /* Check if we have second version location used */ 492 /* Check if we have second version location used */
495 else if ((i == 1) && 493 else if ((i == 1) &&
496 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { 494 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
497 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 495 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
498 status = E1000_SUCCESS; 496 status = 0;
499 break; 497 break;
500 } 498 }
501 /* Check if we have odd version location 499 /* Check if we have odd version location
@@ -506,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
506 (i != 1))) { 504 (i != 1))) {
507 version = (*next_record & E1000_INVM_VER_FIELD_TWO) 505 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
508 >> 13; 506 >> 13;
509 status = E1000_SUCCESS; 507 status = 0;
510 break; 508 break;
511 } 509 }
512 /* Check if we have even version location 510 /* Check if we have even version location
@@ -515,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
515 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && 513 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
516 ((*record & 0x3) == 0)) { 514 ((*record & 0x3) == 0)) {
517 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 515 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
518 status = E1000_SUCCESS; 516 status = 0;
519 break; 517 break;
520 } 518 }
521 } 519 }
522 520
523 if (status == E1000_SUCCESS) { 521 if (!status) {
524 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) 522 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
525 >> E1000_INVM_MAJOR_SHIFT; 523 >> E1000_INVM_MAJOR_SHIFT;
526 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; 524 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -533,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
533 /* Check if we have image type in first location used */ 531 /* Check if we have image type in first location used */
534 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { 532 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
535 invm_ver->invm_img_type = 0; 533 invm_ver->invm_img_type = 0;
536 status = E1000_SUCCESS; 534 status = 0;
537 break; 535 break;
538 } 536 }
539 /* Check if we have image type in first location used */ 537 /* Check if we have image type in first location used */
@@ -542,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
542 ((((*record & 0x3) != 0) && (i != 1)))) { 540 ((((*record & 0x3) != 0) && (i != 1)))) {
543 invm_ver->invm_img_type = 541 invm_ver->invm_img_type =
544 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; 542 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
545 status = E1000_SUCCESS; 543 status = 0;
546 break; 544 break;
547 } 545 }
548 } 546 }
@@ -558,10 +556,10 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
558 **/ 556 **/
559static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 557static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
560{ 558{
561 s32 status = E1000_SUCCESS; 559 s32 status = 0;
562 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 560 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
563 561
564 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 562 if (!(hw->nvm.ops.acquire(hw))) {
565 563
566 /* Replace the read function with semaphore grabbing with 564 /* Replace the read function with semaphore grabbing with
567 * the one that skips this for a while. 565 * the one that skips this for a while.
@@ -593,7 +591,7 @@ static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
593 **/ 591 **/
594static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 592static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
595{ 593{
596 s32 ret_val = E1000_SUCCESS; 594 s32 ret_val = 0;
597 u16 checksum = 0; 595 u16 checksum = 0;
598 u16 i, nvm_data; 596 u16 i, nvm_data;
599 597
@@ -602,12 +600,12 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
602 * EEPROM read fails 600 * EEPROM read fails
603 */ 601 */
604 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); 602 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
605 if (ret_val != E1000_SUCCESS) { 603 if (ret_val) {
606 hw_dbg("EEPROM read failed\n"); 604 hw_dbg("EEPROM read failed\n");
607 goto out; 605 goto out;
608 } 606 }
609 607
610 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 608 if (!(hw->nvm.ops.acquire(hw))) {
611 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 609 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
612 * because we do not want to take the synchronization 610 * because we do not want to take the synchronization
613 * semaphores twice here. 611 * semaphores twice here.
@@ -625,7 +623,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
625 checksum = (u16) NVM_SUM - checksum; 623 checksum = (u16) NVM_SUM - checksum;
626 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 624 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
627 &checksum); 625 &checksum);
628 if (ret_val != E1000_SUCCESS) { 626 if (ret_val) {
629 hw->nvm.ops.release(hw); 627 hw->nvm.ops.release(hw);
630 hw_dbg("NVM Write Error while updating checksum.\n"); 628 hw_dbg("NVM Write Error while updating checksum.\n");
631 goto out; 629 goto out;
@@ -654,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
654 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { 652 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
655 reg = rd32(E1000_EECD); 653 reg = rd32(E1000_EECD);
656 if (reg & E1000_EECD_FLUDONE_I210) { 654 if (reg & E1000_EECD_FLUDONE_I210) {
657 ret_val = E1000_SUCCESS; 655 ret_val = 0;
658 break; 656 break;
659 } 657 }
660 udelay(5); 658 udelay(5);
@@ -687,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
687 **/ 685 **/
688static s32 igb_update_flash_i210(struct e1000_hw *hw) 686static s32 igb_update_flash_i210(struct e1000_hw *hw)
689{ 687{
690 s32 ret_val = E1000_SUCCESS; 688 s32 ret_val = 0;
691 u32 flup; 689 u32 flup;
692 690
693 ret_val = igb_pool_flash_update_done_i210(hw); 691 ret_val = igb_pool_flash_update_done_i210(hw);
@@ -700,7 +698,7 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
700 wr32(E1000_EECD, flup); 698 wr32(E1000_EECD, flup);
701 699
702 ret_val = igb_pool_flash_update_done_i210(hw); 700 ret_val = igb_pool_flash_update_done_i210(hw);
703 if (ret_val == E1000_SUCCESS) 701 if (ret_val)
704 hw_dbg("Flash update complete\n"); 702 hw_dbg("Flash update complete\n");
705 else 703 else
706 hw_dbg("Flash update time out\n"); 704 hw_dbg("Flash update time out\n");
@@ -753,7 +751,7 @@ out:
753static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, 751static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
754 u8 dev_addr, u16 *data, bool read) 752 u8 dev_addr, u16 *data, bool read)
755{ 753{
756 s32 ret_val = E1000_SUCCESS; 754 s32 ret_val = 0;
757 755
758 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); 756 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
759 if (ret_val) 757 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_I210_H_ 24#ifndef _E1000_I210_H_
28#define _E1000_I210_H_ 25#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include <linux/if_ether.h> 24#include <linux/if_ether.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
442 * The caller must have a packed mc_addr_list of multicast addresses. 439 * The caller must have a packed mc_addr_list of multicast addresses.
443 **/ 440 **/
444void igb_update_mc_addr_list(struct e1000_hw *hw, 441void igb_update_mc_addr_list(struct e1000_hw *hw,
445 u8 *mc_addr_list, u32 mc_addr_count) 442 u8 *mc_addr_list, u32 mc_addr_count)
446{ 443{
447 u32 hash_value, hash_bit, hash_reg; 444 u32 hash_value, hash_bit, hash_reg;
448 int i; 445 int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
866 goto out; 863 goto out;
867 864
868 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 865 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
869 hw_dbg("Copper PHY and Auto Neg " 866 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
870 "has not completed.\n");
871 goto out; 867 goto out;
872 } 868 }
873 869
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1265 while (i < AUTO_READ_DONE_TIMEOUT) { 1261 while (i < AUTO_READ_DONE_TIMEOUT) {
1266 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) 1262 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1267 break; 1263 break;
1268 msleep(1); 1264 usleep_range(1000, 2000);
1269 i++; 1265 i++;
1270 } 1266 }
1271 1267
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1298 } 1294 }
1299 1295
1300 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1296 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1301 switch(hw->phy.media_type) { 1297 switch (hw->phy.media_type) {
1302 case e1000_media_type_internal_serdes: 1298 case e1000_media_type_internal_serdes:
1303 *data = ID_LED_DEFAULT_82575_SERDES; 1299 *data = ID_LED_DEFAULT_82575_SERDES;
1304 break; 1300 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_MAC_H_ 24#ifndef _E1000_MAC_H_
28#define _E1000_MAC_H_ 25#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include "e1000_mbx.h" 24#include "e1000_mbx.h"
28 25
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_MBX_H_ 24#ifndef _E1000_MBX_H_
28#define _E1000_MBX_H_ 25#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..e8280d0d7f02 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 * This program is free software; you can redistribute it and/or modify it
4 Copyright(c) 2007-2014 Intel Corporation. 4 * under the terms and conditions of the GNU General Public License,
5 5 * version 2, as published by the Free Software Foundation.
6 This program is free software; you can redistribute it and/or modify it 6 *
7 under the terms and conditions of the GNU General Public License, 7 * This program is distributed in the hope it will be useful, but WITHOUT
8 version 2, as published by the Free Software Foundation. 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * more details.
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 *
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * You should have received a copy of the GNU General Public License along with
13 more details. 13 * this program; if not, see <http://www.gnu.org/licenses/>.
14 14 *
15 You should have received a copy of the GNU General Public License along with 15 * The full GNU General Public License is included in this distribution in
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * the file called "COPYING".
17 17 *
18 The full GNU General Public License is included in this distribution in 18 * Contact Information:
19 the file called "COPYING". 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 Contact Information: 21 */
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 22
27#include <linux/if_ether.h> 23#include <linux/if_ether.h>
28#include <linux/delay.h> 24#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
480 /* Loop to allow for up to whole page write of eeprom */ 476 /* Loop to allow for up to whole page write of eeprom */
481 while (widx < words) { 477 while (widx < words) {
482 u16 word_out = data[widx]; 478 u16 word_out = data[widx];
479
483 word_out = (word_out >> 8) | (word_out << 8); 480 word_out = (word_out >> 8) | (word_out << 8);
484 igb_shift_out_eec_bits(hw, word_out, 16); 481 igb_shift_out_eec_bits(hw, word_out, 16);
485 widx++; 482 widx++;
@@ -801,5 +798,4 @@ etrack_id:
801 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) 798 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
802 | eeprom_verl; 799 | eeprom_verl;
803 } 800 }
804 return;
805} 801}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_NVM_H_ 24#ifndef _E1000_NVM_H_
28#define _E1000_NVM_H_ 25#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
32s32 igb_read_mac_addr(struct e1000_hw *hw); 29s32 igb_read_mac_addr(struct e1000_hw *hw);
33s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); 30s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
34s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, 31s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
35 u32 part_num_size); 32 u32 part_num_size);
36s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 33s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
37s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 34s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 35s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..c1bb64d8366f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include <linux/if_ether.h> 24#include <linux/if_ether.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
924 if (phy->autoneg_wait_to_complete) { 921 if (phy->autoneg_wait_to_complete) {
925 ret_val = igb_wait_autoneg(hw); 922 ret_val = igb_wait_autoneg(hw);
926 if (ret_val) { 923 if (ret_val) {
927 hw_dbg("Error while waiting for " 924 hw_dbg("Error while waiting for autoneg to complete\n");
928 "autoneg to complete\n");
929 goto out; 925 goto out;
930 } 926 }
931 } 927 }
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2208void igb_power_up_phy_copper(struct e1000_hw *hw) 2204void igb_power_up_phy_copper(struct e1000_hw *hw)
2209{ 2205{
2210 u16 mii_reg = 0; 2206 u16 mii_reg = 0;
2211 u16 power_reg = 0;
2212 2207
2213 /* The PHY will retain its settings across a power down/up cycle */ 2208 /* The PHY will retain its settings across a power down/up cycle */
2214 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2209 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2215 mii_reg &= ~MII_CR_POWER_DOWN; 2210 mii_reg &= ~MII_CR_POWER_DOWN;
2216 if (hw->phy.type == e1000_phy_i210) {
2217 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2218 power_reg &= ~GS40G_CS_POWER_DOWN;
2219 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2220 }
2221 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2211 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2222} 2212}
2223 2213
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
2231void igb_power_down_phy_copper(struct e1000_hw *hw) 2221void igb_power_down_phy_copper(struct e1000_hw *hw)
2232{ 2222{
2233 u16 mii_reg = 0; 2223 u16 mii_reg = 0;
2234 u16 power_reg = 0;
2235 2224
2236 /* The PHY will retain its settings across a power down/up cycle */ 2225 /* The PHY will retain its settings across a power down/up cycle */
2237 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2226 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2238 mii_reg |= MII_CR_POWER_DOWN; 2227 mii_reg |= MII_CR_POWER_DOWN;
2239
2240 /* i210 Phy requires an additional bit for power up/down */
2241 if (hw->phy.type == e1000_phy_i210) {
2242 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2243 power_reg |= GS40G_CS_POWER_DOWN;
2244 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2245 }
2246 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2228 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2247 msleep(1); 2229 usleep_range(1000, 2000);
2248} 2230}
2249 2231
2250/** 2232/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..7af4ffab0285 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_PHY_H_ 24#ifndef _E1000_PHY_H_
28#define _E1000_PHY_H_ 25#define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
154#define GS40G_MAC_LB 0x4140 151#define GS40G_MAC_LB 0x4140
155#define GS40G_MAC_SPEED_1G 0X0006 152#define GS40G_MAC_SPEED_1G 0X0006
156#define GS40G_COPPER_SPEC 0x0010 153#define GS40G_COPPER_SPEC 0x0010
157#define GS40G_CS_POWER_DOWN 0x0002
158#define GS40G_LINE_LB 0x4000 154#define GS40G_LINE_LB 0x4000
159 155
160/* SFP modules ID memory locations */ 156/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..1cc4b1a7e597 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_REGS_H_ 24#ifndef _E1000_REGS_H_
28#define _E1000_REGS_H_ 25#define _E1000_REGS_H_
@@ -195,6 +192,10 @@
195 : (0x0E038 + ((_n) * 0x40))) 192 : (0x0E038 + ((_n) * 0x40)))
196#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ 193#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
197 : (0x0E03C + ((_n) * 0x40))) 194 : (0x0E03C + ((_n) * 0x40)))
195
196#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
197#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
198
198#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 199#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
199#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 200#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
200#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ 201#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -301,9 +302,9 @@
301#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ 302#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
302#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) 303#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
303#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 304#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
304 (0x054E0 + ((_i - 16) * 8))) 305 (0x054E0 + ((_i - 16) * 8)))
305#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 306#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
306 (0x054E4 + ((_i - 16) * 8))) 307 (0x054E4 + ((_i - 16) * 8)))
307#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) 308#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
308#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) 309#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
309#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) 310#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +359,7 @@
358#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 359#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
359#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 360#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
360#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) 361#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
361#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 362#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
362 * Filter - RW */
363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) 363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
364 364
365struct e1000_hw; 365struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
27 23
28/* Linux PRO/1000 Ethernet Driver main header file */ 24/* Linux PRO/1000 Ethernet Driver main header file */
29 25
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
198 unsigned int bytecount; 194 unsigned int bytecount;
199 u16 gso_segs; 195 u16 gso_segs;
200 __be16 protocol; 196 __be16 protocol;
197
201 DEFINE_DMA_UNMAP_ADDR(dma); 198 DEFINE_DMA_UNMAP_ADDR(dma);
202 DEFINE_DMA_UNMAP_LEN(len); 199 DEFINE_DMA_UNMAP_LEN(len);
203 u32 tx_flags; 200 u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..c737d1f40838 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27/* ethtool support for igb */ 24/* ethtool support for igb */
28 25
@@ -144,6 +141,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 141 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
145 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 142 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
146 u32 status; 143 u32 status;
144 u32 speed;
147 145
148 status = rd32(E1000_STATUS); 146 status = rd32(E1000_STATUS);
149 if (hw->phy.media_type == e1000_media_type_copper) { 147 if (hw->phy.media_type == e1000_media_type_copper) {
@@ -218,13 +216,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
218 if (status & E1000_STATUS_LU) { 216 if (status & E1000_STATUS_LU) {
219 if ((status & E1000_STATUS_2P5_SKU) && 217 if ((status & E1000_STATUS_2P5_SKU) &&
220 !(status & E1000_STATUS_2P5_SKU_OVER)) { 218 !(status & E1000_STATUS_2P5_SKU_OVER)) {
221 ecmd->speed = SPEED_2500; 219 speed = SPEED_2500;
222 } else if (status & E1000_STATUS_SPEED_1000) { 220 } else if (status & E1000_STATUS_SPEED_1000) {
223 ecmd->speed = SPEED_1000; 221 speed = SPEED_1000;
224 } else if (status & E1000_STATUS_SPEED_100) { 222 } else if (status & E1000_STATUS_SPEED_100) {
225 ecmd->speed = SPEED_100; 223 speed = SPEED_100;
226 } else { 224 } else {
227 ecmd->speed = SPEED_10; 225 speed = SPEED_10;
228 } 226 }
229 if ((status & E1000_STATUS_FD) || 227 if ((status & E1000_STATUS_FD) ||
230 hw->phy.media_type != e1000_media_type_copper) 228 hw->phy.media_type != e1000_media_type_copper)
@@ -232,9 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
232 else 230 else
233 ecmd->duplex = DUPLEX_HALF; 231 ecmd->duplex = DUPLEX_HALF;
234 } else { 232 } else {
235 ecmd->speed = -1; 233 speed = SPEED_UNKNOWN;
236 ecmd->duplex = -1; 234 ecmd->duplex = DUPLEX_UNKNOWN;
237 } 235 }
236 ethtool_cmd_speed_set(ecmd, speed);
238 if ((hw->phy.media_type == e1000_media_type_fiber) || 237 if ((hw->phy.media_type == e1000_media_type_fiber) ||
239 hw->mac.autoneg) 238 hw->mac.autoneg)
240 ecmd->autoneg = AUTONEG_ENABLE; 239 ecmd->autoneg = AUTONEG_ENABLE;
@@ -286,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
286 } 285 }
287 286
288 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 287 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
289 msleep(1); 288 usleep_range(1000, 2000);
290 289
291 if (ecmd->autoneg == AUTONEG_ENABLE) { 290 if (ecmd->autoneg == AUTONEG_ENABLE) {
292 hw->mac.autoneg = 1; 291 hw->mac.autoneg = 1;
@@ -399,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
399 adapter->fc_autoneg = pause->autoneg; 398 adapter->fc_autoneg = pause->autoneg;
400 399
401 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 400 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
402 msleep(1); 401 usleep_range(1000, 2000);
403 402
404 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 403 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
405 hw->fc.requested_mode = e1000_fc_default; 404 hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
886 } 885 }
887 886
888 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 887 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
889 msleep(1); 888 usleep_range(1000, 2000);
890 889
891 if (!netif_running(adapter->netdev)) { 890 if (!netif_running(adapter->netdev)) {
892 for (i = 0; i < adapter->num_tx_queues; i++) 891 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
1060 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1059 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1061 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1060 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1062 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1061 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1063 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1062 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1064 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1063 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1065 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1064 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1066 { E1000_RA, 0, 16, TABLE64_TEST_LO, 1065 { E1000_RA, 0, 16, TABLE64_TEST_LO,
1067 0xFFFFFFFF, 0xFFFFFFFF }, 1066 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
1103 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1102 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1104 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1103 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1105 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1104 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1106 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1105 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1107 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1106 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1108 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1107 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1109 { E1000_RA, 0, 16, TABLE64_TEST_LO, 1108 { E1000_RA, 0, 16, TABLE64_TEST_LO,
1110 0xFFFFFFFF, 0xFFFFFFFF }, 1109 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
1132 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1131 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1133 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 1132 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1134 /* Enable all RX queues before testing. */ 1133 /* Enable all RX queues before testing. */
1135 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1134 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
1136 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1135 E1000_RXDCTL_QUEUE_ENABLE },
1136 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
1137 E1000_RXDCTL_QUEUE_ENABLE },
1137 /* RDH is read-only for 82576, only test RDT. */ 1138 /* RDH is read-only for 82576, only test RDT. */
1138 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1139 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1139 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1140 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
1149 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1150 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1150 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 1151 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1151 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1152 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1152 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1153 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1153 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1154 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1154 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1155 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1155 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1156 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1156 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 1157 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1157 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1158 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1158 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 1159 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1159 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1160 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1160 { 0, 0, 0, 0 } 1161 { 0, 0, 0, 0 }
1161}; 1162};
1162 1163
@@ -1170,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
1170 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1171 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1171 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1172 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1172 /* Enable all four RX queues before testing. */ 1173 /* Enable all four RX queues before testing. */
1173 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1174 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
1175 E1000_RXDCTL_QUEUE_ENABLE },
1174 /* RDH is read-only for 82575, only test RDT. */ 1176 /* RDH is read-only for 82575, only test RDT. */
1175 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1177 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1176 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 1178 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1196{ 1198{
1197 struct e1000_hw *hw = &adapter->hw; 1199 struct e1000_hw *hw = &adapter->hw;
1198 u32 pat, val; 1200 u32 pat, val;
1199 static const u32 _test[] = 1201 static const u32 _test[] = {
1200 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1202 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1201 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1203 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1202 wr32(reg, (_test[pat] & write)); 1204 wr32(reg, (_test[pat] & write));
1203 val = rd32(reg) & mask; 1205 val = rd32(reg) & mask;
@@ -1206,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1206 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 1208 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1207 reg, val, (_test[pat] & write & mask)); 1209 reg, val, (_test[pat] & write & mask));
1208 *data = reg; 1210 *data = reg;
1209 return 1; 1211 return true;
1210 } 1212 }
1211 } 1213 }
1212 1214
1213 return 0; 1215 return false;
1214} 1216}
1215 1217
1216static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 1218static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1218{ 1220{
1219 struct e1000_hw *hw = &adapter->hw; 1221 struct e1000_hw *hw = &adapter->hw;
1220 u32 val; 1222 u32 val;
1223
1221 wr32(reg, write & mask); 1224 wr32(reg, write & mask);
1222 val = rd32(reg); 1225 val = rd32(reg);
1223 if ((write & mask) != (val & mask)) { 1226 if ((write & mask) != (val & mask)) {
1224 dev_err(&adapter->pdev->dev, 1227 dev_err(&adapter->pdev->dev,
1225 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, 1228 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1226 (val & mask), (write & mask)); 1229 reg, (val & mask), (write & mask));
1227 *data = reg; 1230 *data = reg;
1228 return 1; 1231 return true;
1229 } 1232 }
1230 1233
1231 return 0; 1234 return false;
1232} 1235}
1233 1236
1234#define REG_PATTERN_TEST(reg, mask, write) \ 1237#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1390,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1387 /* Hook up test interrupt handler just for this test */ 1390 /* Hook up test interrupt handler just for this test */
1388 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1391 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1389 if (request_irq(adapter->msix_entries[0].vector, 1392 if (request_irq(adapter->msix_entries[0].vector,
1390 igb_test_intr, 0, netdev->name, adapter)) { 1393 igb_test_intr, 0, netdev->name, adapter)) {
1391 *data = 1; 1394 *data = 1;
1392 return -1; 1395 return -1;
1393 } 1396 }
1394 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1397 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1395 shared_int = false; 1398 shared_int = false;
1396 if (request_irq(irq, 1399 if (request_irq(irq,
1397 igb_test_intr, 0, netdev->name, adapter)) { 1400 igb_test_intr, 0, netdev->name, adapter)) {
1398 *data = 1; 1401 *data = 1;
1399 return -1; 1402 return -1;
1400 } 1403 }
@@ -1412,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1412 /* Disable all the interrupts */ 1415 /* Disable all the interrupts */
1413 wr32(E1000_IMC, ~0); 1416 wr32(E1000_IMC, ~0);
1414 wrfl(); 1417 wrfl();
1415 msleep(10); 1418 usleep_range(10000, 11000);
1416 1419
1417 /* Define all writable bits for ICS */ 1420 /* Define all writable bits for ICS */
1418 switch (hw->mac.type) { 1421 switch (hw->mac.type) {
@@ -1459,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1459 wr32(E1000_IMC, mask); 1462 wr32(E1000_IMC, mask);
1460 wr32(E1000_ICS, mask); 1463 wr32(E1000_ICS, mask);
1461 wrfl(); 1464 wrfl();
1462 msleep(10); 1465 usleep_range(10000, 11000);
1463 1466
1464 if (adapter->test_icr & mask) { 1467 if (adapter->test_icr & mask) {
1465 *data = 3; 1468 *data = 3;
@@ -1481,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1481 wr32(E1000_IMS, mask); 1484 wr32(E1000_IMS, mask);
1482 wr32(E1000_ICS, mask); 1485 wr32(E1000_ICS, mask);
1483 wrfl(); 1486 wrfl();
1484 msleep(10); 1487 usleep_range(10000, 11000);
1485 1488
1486 if (!(adapter->test_icr & mask)) { 1489 if (!(adapter->test_icr & mask)) {
1487 *data = 4; 1490 *data = 4;
@@ -1503,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1503 wr32(E1000_IMC, ~mask); 1506 wr32(E1000_IMC, ~mask);
1504 wr32(E1000_ICS, ~mask); 1507 wr32(E1000_ICS, ~mask);
1505 wrfl(); 1508 wrfl();
1506 msleep(10); 1509 usleep_range(10000, 11000);
1507 1510
1508 if (adapter->test_icr & mask) { 1511 if (adapter->test_icr & mask) {
1509 *data = 5; 1512 *data = 5;
@@ -1515,7 +1518,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1515 /* Disable all the interrupts */ 1518 /* Disable all the interrupts */
1516 wr32(E1000_IMC, ~0); 1519 wr32(E1000_IMC, ~0);
1517 wrfl(); 1520 wrfl();
1518 msleep(10); 1521 usleep_range(10000, 11000);
1519 1522
1520 /* Unhook test interrupt handler */ 1523 /* Unhook test interrupt handler */
1521 if (adapter->flags & IGB_FLAG_HAS_MSIX) 1524 if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1664,8 +1667,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1664 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1667 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1665 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1668 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1666 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || 1669 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
1667 (hw->device_id == E1000_DEV_ID_I354_SGMII)) { 1670 (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
1668 1671 (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
1669 /* Enable DH89xxCC MPHY for near end loopback */ 1672 /* Enable DH89xxCC MPHY for near end loopback */
1670 reg = rd32(E1000_MPHY_ADDR_CTL); 1673 reg = rd32(E1000_MPHY_ADDR_CTL);
1671 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | 1674 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1949,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1949 *data = 0; 1952 *data = 0;
1950 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1953 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1951 int i = 0; 1954 int i = 0;
1955
1952 hw->mac.serdes_has_link = false; 1956 hw->mac.serdes_has_link = false;
1953 1957
1954 /* On some blade server designs, link establishment 1958 /* On some blade server designs, link establishment
@@ -2413,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2413 switch (cmd->flow_type) { 2417 switch (cmd->flow_type) {
2414 case TCP_V4_FLOW: 2418 case TCP_V4_FLOW:
2415 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2419 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2420 /* Fall through */
2416 case UDP_V4_FLOW: 2421 case UDP_V4_FLOW:
2417 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) 2422 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2418 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2424 /* Fall through */
2419 case SCTP_V4_FLOW: 2425 case SCTP_V4_FLOW:
2420 case AH_ESP_V4_FLOW: 2426 case AH_ESP_V4_FLOW:
2421 case AH_V4_FLOW: 2427 case AH_V4_FLOW:
@@ -2425,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2425 break; 2431 break;
2426 case TCP_V6_FLOW: 2432 case TCP_V6_FLOW:
2427 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2433 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2434 /* Fall through */
2428 case UDP_V6_FLOW: 2435 case UDP_V6_FLOW:
2429 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) 2436 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2430 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2437 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2438 /* Fall through */
2431 case SCTP_V6_FLOW: 2439 case SCTP_V6_FLOW:
2432 case AH_ESP_V6_FLOW: 2440 case AH_ESP_V6_FLOW:
2433 case AH_V6_FLOW: 2441 case AH_V6_FLOW:
@@ -2730,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
2730{ 2738{
2731 struct igb_adapter *adapter = netdev_priv(netdev); 2739 struct igb_adapter *adapter = netdev_priv(netdev);
2732 struct e1000_hw *hw = &adapter->hw; 2740 struct e1000_hw *hw = &adapter->hw;
2733 u32 status = E1000_SUCCESS; 2741 u32 status = 0;
2734 u16 sff8472_rev, addr_mode; 2742 u16 sff8472_rev, addr_mode;
2735 bool page_swap = false; 2743 bool page_swap = false;
2736 2744
@@ -2740,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
2740 2748
2741 /* Check whether we support SFF-8472 or not */ 2749 /* Check whether we support SFF-8472 or not */
2742 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 2750 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
2743 if (status != E1000_SUCCESS) 2751 if (status)
2744 return -EIO; 2752 return -EIO;
2745 2753
2746 /* addressing mode is not supported */ 2754 /* addressing mode is not supported */
2747 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 2755 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
2748 if (status != E1000_SUCCESS) 2756 if (status)
2749 return -EIO; 2757 return -EIO;
2750 2758
2751 /* addressing mode is not supported */ 2759 /* addressing mode is not supported */
@@ -2772,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2772{ 2780{
2773 struct igb_adapter *adapter = netdev_priv(netdev); 2781 struct igb_adapter *adapter = netdev_priv(netdev);
2774 struct e1000_hw *hw = &adapter->hw; 2782 struct e1000_hw *hw = &adapter->hw;
2775 u32 status = E1000_SUCCESS; 2783 u32 status = 0;
2776 u16 *dataword; 2784 u16 *dataword;
2777 u16 first_word, last_word; 2785 u16 first_word, last_word;
2778 int i = 0; 2786 int i = 0;
@@ -2791,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2791 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 2799 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2792 for (i = 0; i < last_word - first_word + 1; i++) { 2800 for (i = 0; i < last_word - first_word + 1; i++) {
2793 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); 2801 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2794 if (status != E1000_SUCCESS) { 2802 if (status) {
2795 /* Error occurred while reading module */ 2803 /* Error occurred while reading module */
2796 kfree(dataword); 2804 kfree(dataword);
2797 return -EIO; 2805 return -EIO;
@@ -2824,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
2824 return IGB_RETA_SIZE; 2832 return IGB_RETA_SIZE;
2825} 2833}
2826 2834
2827static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) 2835static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
2828{ 2836{
2829 struct igb_adapter *adapter = netdev_priv(netdev); 2837 struct igb_adapter *adapter = netdev_priv(netdev);
2830 int i; 2838 int i;
@@ -2870,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
2870 } 2878 }
2871} 2879}
2872 2880
2873static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) 2881static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
2882 const u8 *key)
2874{ 2883{
2875 struct igb_adapter *adapter = netdev_priv(netdev); 2884 struct igb_adapter *adapter = netdev_priv(netdev);
2876 struct e1000_hw *hw = &adapter->hw; 2885 struct e1000_hw *hw = &adapter->hw;
@@ -3019,8 +3028,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
3019 .get_module_info = igb_get_module_info, 3028 .get_module_info = igb_get_module_info,
3020 .get_module_eeprom = igb_get_module_eeprom, 3029 .get_module_eeprom = igb_get_module_eeprom,
3021 .get_rxfh_indir_size = igb_get_rxfh_indir_size, 3030 .get_rxfh_indir_size = igb_get_rxfh_indir_size,
3022 .get_rxfh_indir = igb_get_rxfh_indir, 3031 .get_rxfh = igb_get_rxfh,
3023 .set_rxfh_indir = igb_set_rxfh_indir, 3032 .set_rxfh = igb_set_rxfh,
3024 .get_channels = igb_get_channels, 3033 .get_channels = igb_get_channels,
3025 .set_channels = igb_set_channels, 3034 .set_channels = igb_set_channels,
3026 .begin = igb_ethtool_begin, 3035 .begin = igb_ethtool_begin,
@@ -3029,5 +3038,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
3029 3038
3030void igb_set_ethtool_ops(struct net_device *netdev) 3039void igb_set_ethtool_ops(struct net_device *netdev)
3031{ 3040{
3032 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); 3041 netdev->ethtool_ops = &igb_ethtool_ops;
3033} 3042}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include "igb.h" 24#include "igb.h"
28#include "e1000_82575.h" 25#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..f145adbb55ac 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 25
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
75 [board_82575] = &e1000_82575_info, 72 [board_82575] = &e1000_82575_info,
76}; 73};
77 74
78static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 75static const struct pci_device_id igb_pci_tbl[] = {
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
117 114
118MODULE_DEVICE_TABLE(pci, igb_pci_tbl); 115MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
119 116
120void igb_reset(struct igb_adapter *);
121static int igb_setup_all_tx_resources(struct igb_adapter *); 117static int igb_setup_all_tx_resources(struct igb_adapter *);
122static int igb_setup_all_rx_resources(struct igb_adapter *); 118static int igb_setup_all_rx_resources(struct igb_adapter *);
123static void igb_free_all_tx_resources(struct igb_adapter *); 119static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
141static void igb_watchdog_task(struct work_struct *); 137static void igb_watchdog_task(struct work_struct *);
142static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); 138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
143static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, 139static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
144 struct rtnl_link_stats64 *stats); 140 struct rtnl_link_stats64 *stats);
145static int igb_change_mtu(struct net_device *, int); 141static int igb_change_mtu(struct net_device *, int);
146static int igb_set_mac(struct net_device *, void *); 142static int igb_set_mac(struct net_device *, void *);
147static void igb_set_uta(struct igb_adapter *adapter); 143static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
159static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 155static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
160static void igb_tx_timeout(struct net_device *); 156static void igb_tx_timeout(struct net_device *);
161static void igb_reset_task(struct work_struct *); 157static void igb_reset_task(struct work_struct *);
162static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); 158static void igb_vlan_mode(struct net_device *netdev,
159 netdev_features_t features);
163static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); 160static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
164static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); 161static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
165static void igb_restore_vlan(struct igb_adapter *); 162static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
172static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); 169static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
173static int igb_ndo_set_vf_vlan(struct net_device *netdev, 170static int igb_ndo_set_vf_vlan(struct net_device *netdev,
174 int vf, u16 vlan, u8 qos); 171 int vf, u16 vlan, u8 qos);
175static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 172static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
176static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, 173static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
177 bool setting); 174 bool setting);
178static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 175static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
215static void igb_netpoll(struct net_device *); 212static void igb_netpoll(struct net_device *);
216#endif 213#endif
217#ifdef CONFIG_PCI_IOV 214#ifdef CONFIG_PCI_IOV
218static unsigned int max_vfs = 0; 215static unsigned int max_vfs;
219module_param(max_vfs, uint, 0); 216module_param(max_vfs, uint, 0);
220MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 217MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
221 "per physical function");
222#endif /* CONFIG_PCI_IOV */ 218#endif /* CONFIG_PCI_IOV */
223 219
224static pci_ers_result_t igb_io_error_detected(struct pci_dev *, 220static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
384 /* Print netdevice Info */ 380 /* Print netdevice Info */
385 if (netdev) { 381 if (netdev) {
386 dev_info(&adapter->pdev->dev, "Net device Info\n"); 382 dev_info(&adapter->pdev->dev, "Net device Info\n");
387 pr_info("Device Name state trans_start " 383 pr_info("Device Name state trans_start last_rx\n");
388 "last_rx\n");
389 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, 384 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
390 netdev->state, netdev->trans_start, netdev->last_rx); 385 netdev->state, netdev->trans_start, netdev->last_rx);
391 } 386 }
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
438 pr_info("------------------------------------\n"); 433 pr_info("------------------------------------\n");
439 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 434 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
440 pr_info("------------------------------------\n"); 435 pr_info("------------------------------------\n");
441 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " 436 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
442 "[bi->dma ] leng ntw timestamp "
443 "bi->skb\n");
444 437
445 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
446 const char *next_desc; 439 const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
458 else 451 else
459 next_desc = ""; 452 next_desc = "";
460 453
461 pr_info("T [0x%03X] %016llX %016llX %016llX" 454 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
462 " %04X %p %016llX %p%s\n", i, 455 i, le64_to_cpu(u0->a),
463 le64_to_cpu(u0->a),
464 le64_to_cpu(u0->b), 456 le64_to_cpu(u0->b),
465 (u64)dma_unmap_addr(buffer_info, dma), 457 (u64)dma_unmap_addr(buffer_info, dma),
466 dma_unmap_len(buffer_info, len), 458 dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
519 pr_info("------------------------------------\n"); 511 pr_info("------------------------------------\n");
520 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
521 pr_info("------------------------------------\n"); 513 pr_info("------------------------------------\n");
522 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " 514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
523 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); 515 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
524 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
525 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
526 516
527 for (i = 0; i < rx_ring->count; i++) { 517 for (i = 0; i < rx_ring->count; i++) {
528 const char *next_desc; 518 const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
584 struct e1000_hw *hw = &adapter->hw; 574 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS); 575 s32 i2cctl = rd32(E1000_I2CPARAMS);
586 576
587 return ((i2cctl & E1000_I2C_DATA_IN) != 0); 577 return !!(i2cctl & E1000_I2C_DATA_IN);
588} 578}
589 579
590/** 580/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
648 struct e1000_hw *hw = &adapter->hw; 638 struct e1000_hw *hw = &adapter->hw;
649 s32 i2cctl = rd32(E1000_I2CPARAMS); 639 s32 i2cctl = rd32(E1000_I2CPARAMS);
650 640
651 return ((i2cctl & E1000_I2C_CLK_IN) != 0); 641 return !!(i2cctl & E1000_I2C_CLK_IN);
652} 642}
653 643
654static const struct i2c_algo_bit_data igb_i2c_algo = { 644static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
681static int __init igb_init_module(void) 671static int __init igb_init_module(void)
682{ 672{
683 int ret; 673 int ret;
674
684 pr_info("%s - version %s\n", 675 pr_info("%s - version %s\n",
685 igb_driver_string, igb_driver_version); 676 igb_driver_string, igb_driver_version);
686
687 pr_info("%s\n", igb_copyright); 677 pr_info("%s\n", igb_copyright);
688 678
689#ifdef CONFIG_IGB_DCA 679#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
736 adapter->rx_ring[i]->reg_idx = rbase_offset + 726 adapter->rx_ring[i]->reg_idx = rbase_offset +
737 Q_IDX_82576(i); 727 Q_IDX_82576(i);
738 } 728 }
729 /* Fall through */
739 case e1000_82575: 730 case e1000_82575:
740 case e1000_82580: 731 case e1000_82580:
741 case e1000_i350: 732 case e1000_i350:
742 case e1000_i354: 733 case e1000_i354:
743 case e1000_i210: 734 case e1000_i210:
744 case e1000_i211: 735 case e1000_i211:
736 /* Fall through */
745 default: 737 default:
746 for (; i < adapter->num_rx_queues; i++) 738 for (; i < adapter->num_rx_queues; i++)
747 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 739 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1292 if (adapter->hw.mac.type >= e1000_82576) 1284 if (adapter->hw.mac.type >= e1000_82576)
1293 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1285 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1294 1286
1295 /* 1287 /* On i350, i354, i210, and i211, loopback VLAN packets
1296 * On i350, i354, i210, and i211, loopback VLAN packets
1297 * have the tag byte-swapped. 1288 * have the tag byte-swapped.
1298 */ 1289 */
1299 if (adapter->hw.mac.type >= e1000_i350) 1290 if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1345 for (; v_idx < q_vectors; v_idx++) { 1336 for (; v_idx < q_vectors; v_idx++) {
1346 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1337 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1347 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1338 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1339
1348 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, 1340 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1349 tqpv, txr_idx, rqpv, rxr_idx); 1341 tqpv, txr_idx, rqpv, rxr_idx);
1350 1342
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1484 */ 1476 */
1485 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1477 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1486 u32 regval = rd32(E1000_EIAM); 1478 u32 regval = rd32(E1000_EIAM);
1479
1487 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 1480 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1488 wr32(E1000_EIMC, adapter->eims_enable_mask); 1481 wr32(E1000_EIMC, adapter->eims_enable_mask);
1489 regval = rd32(E1000_EIAC); 1482 regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1495 wrfl(); 1488 wrfl();
1496 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1489 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1497 int i; 1490 int i;
1491
1498 for (i = 0; i < adapter->num_q_vectors; i++) 1492 for (i = 0; i < adapter->num_q_vectors; i++)
1499 synchronize_irq(adapter->msix_entries[i].vector); 1493 synchronize_irq(adapter->msix_entries[i].vector);
1500 } else { 1494 } else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
1513 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1507 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1514 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; 1508 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1515 u32 regval = rd32(E1000_EIAC); 1509 u32 regval = rd32(E1000_EIAC);
1510
1516 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 1511 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1517 regval = rd32(E1000_EIAM); 1512 regval = rd32(E1000_EIAM);
1518 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 1513 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
1745 /* notify VFs that reset has been completed */ 1740 /* notify VFs that reset has been completed */
1746 if (adapter->vfs_allocated_count) { 1741 if (adapter->vfs_allocated_count) {
1747 u32 reg_data = rd32(E1000_CTRL_EXT); 1742 u32 reg_data = rd32(E1000_CTRL_EXT);
1743
1748 reg_data |= E1000_CTRL_EXT_PFRSTD; 1744 reg_data |= E1000_CTRL_EXT_PFRSTD;
1749 wr32(E1000_CTRL_EXT, reg_data); 1745 wr32(E1000_CTRL_EXT, reg_data);
1750 } 1746 }
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
1787 wr32(E1000_TCTL, tctl); 1783 wr32(E1000_TCTL, tctl);
1788 /* flush both disables and wait for them to finish */ 1784 /* flush both disables and wait for them to finish */
1789 wrfl(); 1785 wrfl();
1790 msleep(10); 1786 usleep_range(10000, 11000);
1791 1787
1792 igb_irq_disable(adapter); 1788 igb_irq_disable(adapter);
1793 1789
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1827{ 1823{
1828 WARN_ON(in_interrupt()); 1824 WARN_ON(in_interrupt());
1829 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 1825 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1830 msleep(1); 1826 usleep_range(1000, 2000);
1831 igb_down(adapter); 1827 igb_down(adapter);
1832 igb_up(adapter); 1828 igb_up(adapter);
1833 clear_bit(__IGB_RESETTING, &adapter->state); 1829 clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
1960 /* disable receive for all VFs and wait one second */ 1956 /* disable receive for all VFs and wait one second */
1961 if (adapter->vfs_allocated_count) { 1957 if (adapter->vfs_allocated_count) {
1962 int i; 1958 int i;
1959
1963 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1960 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1964 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; 1961 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1965 1962
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
2087 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, 2084 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2088 .ndo_set_vf_mac = igb_ndo_set_vf_mac, 2085 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2089 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, 2086 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2090 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, 2087 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2091 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, 2088 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2092 .ndo_get_vf_config = igb_ndo_get_vf_config, 2089 .ndo_get_vf_config = igb_ndo_get_vf_config,
2093#ifdef CONFIG_NET_POLL_CONTROLLER 2090#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
2142 } 2139 }
2143 break; 2140 break;
2144 } 2141 }
2145 return;
2146} 2142}
2147 2143
2148/** 2144/**
@@ -2203,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
2203 **/ 2199 **/
2204static s32 igb_init_i2c(struct igb_adapter *adapter) 2200static s32 igb_init_i2c(struct igb_adapter *adapter)
2205{ 2201{
2206 s32 status = E1000_SUCCESS; 2202 s32 status = 0;
2207 2203
2208 /* I2C interface supported on i350 devices */ 2204 /* I2C interface supported on i350 devices */
2209 if (adapter->hw.mac.type != e1000_i350) 2205 if (adapter->hw.mac.type != e1000_i350)
2210 return E1000_SUCCESS; 2206 return 0;
2211 2207
2212 /* Initialize the i2c bus which is controlled by the registers. 2208 /* Initialize the i2c bus which is controlled by the registers.
2213 * This bus will use the i2c_algo_bit structue that implements 2209 * This bus will use the i2c_algo_bit structue that implements
@@ -2437,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2437 /* get firmware version for ethtool -i */ 2433 /* get firmware version for ethtool -i */
2438 igb_set_fw_version(adapter); 2434 igb_set_fw_version(adapter);
2439 2435
2436 /* configure RXPBSIZE and TXPBSIZE */
2437 if (hw->mac.type == e1000_i210) {
2438 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2439 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2440 }
2441
2440 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2442 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2441 (unsigned long) adapter); 2443 (unsigned long) adapter);
2442 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2444 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2529,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2529 } 2531 }
2530 2532
2531 /* let the f/w know that the h/w is now under the control of the 2533 /* let the f/w know that the h/w is now under the control of the
2532 * driver. */ 2534 * driver.
2535 */
2533 igb_get_hw_control(adapter); 2536 igb_get_hw_control(adapter);
2534 2537
2535 strcpy(netdev->name, "eth%d"); 2538 strcpy(netdev->name, "eth%d");
@@ -3077,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
3077 /* notify VFs that reset has been completed */ 3080 /* notify VFs that reset has been completed */
3078 if (adapter->vfs_allocated_count) { 3081 if (adapter->vfs_allocated_count) {
3079 u32 reg_data = rd32(E1000_CTRL_EXT); 3082 u32 reg_data = rd32(E1000_CTRL_EXT);
3083
3080 reg_data |= E1000_CTRL_EXT_PFRSTD; 3084 reg_data |= E1000_CTRL_EXT_PFRSTD;
3081 wr32(E1000_CTRL_EXT, reg_data); 3085 wr32(E1000_CTRL_EXT, reg_data);
3082 } 3086 }
@@ -3248,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
3248 * Configure a transmit ring after a reset. 3252 * Configure a transmit ring after a reset.
3249 **/ 3253 **/
3250void igb_configure_tx_ring(struct igb_adapter *adapter, 3254void igb_configure_tx_ring(struct igb_adapter *adapter,
3251 struct igb_ring *ring) 3255 struct igb_ring *ring)
3252{ 3256{
3253 struct e1000_hw *hw = &adapter->hw; 3257 struct e1000_hw *hw = &adapter->hw;
3254 u32 txdctl = 0; 3258 u32 txdctl = 0;
@@ -3389,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3389 3393
3390 if (adapter->rss_indir_tbl_init != num_rx_queues) { 3394 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3391 for (j = 0; j < IGB_RETA_SIZE; j++) 3395 for (j = 0; j < IGB_RETA_SIZE; j++)
3392 adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE; 3396 adapter->rss_indir_tbl[j] =
3397 (j * num_rx_queues) / IGB_RETA_SIZE;
3393 adapter->rss_indir_tbl_init = num_rx_queues; 3398 adapter->rss_indir_tbl_init = num_rx_queues;
3394 } 3399 }
3395 igb_write_rss_indir_tbl(adapter); 3400 igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3430 if (hw->mac.type > e1000_82575) { 3435 if (hw->mac.type > e1000_82575) {
3431 /* Set the default pool for the PF's first queue */ 3436 /* Set the default pool for the PF's first queue */
3432 u32 vtctl = rd32(E1000_VT_CTL); 3437 u32 vtctl = rd32(E1000_VT_CTL);
3438
3433 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 3439 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3434 E1000_VT_CTL_DISABLE_DEF_POOL); 3440 E1000_VT_CTL_DISABLE_DEF_POOL);
3435 vtctl |= adapter->vfs_allocated_count << 3441 vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3511} 3517}
3512 3518
3513static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, 3519static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3514 int vfn) 3520 int vfn)
3515{ 3521{
3516 struct e1000_hw *hw = &adapter->hw; 3522 struct e1000_hw *hw = &adapter->hw;
3517 u32 vmolr; 3523 u32 vmolr;
@@ -4058,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
4058 switch (hw->mac.type) { 4064 switch (hw->mac.type) {
4059 case e1000_82576: 4065 case e1000_82576:
4060 case e1000_i350: 4066 case e1000_i350:
4061 if (!(wvbr = rd32(E1000_WVBR))) 4067 wvbr = rd32(E1000_WVBR);
4068 if (!wvbr)
4062 return; 4069 return;
4063 break; 4070 break;
4064 default: 4071 default:
@@ -4077,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
4077 if (!adapter->wvbr) 4084 if (!adapter->wvbr)
4078 return; 4085 return;
4079 4086
4080 for(j = 0; j < adapter->vfs_allocated_count; j++) { 4087 for (j = 0; j < adapter->vfs_allocated_count; j++) {
4081 if (adapter->wvbr & (1 << j) || 4088 if (adapter->wvbr & (1 << j) ||
4082 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { 4089 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
4083 dev_warn(&adapter->pdev->dev, 4090 dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4216,15 @@ static void igb_watchdog_task(struct work_struct *work)
4209 4216
4210 if (!netif_carrier_ok(netdev)) { 4217 if (!netif_carrier_ok(netdev)) {
4211 u32 ctrl; 4218 u32 ctrl;
4219
4212 hw->mac.ops.get_speed_and_duplex(hw, 4220 hw->mac.ops.get_speed_and_duplex(hw,
4213 &adapter->link_speed, 4221 &adapter->link_speed,
4214 &adapter->link_duplex); 4222 &adapter->link_duplex);
4215 4223
4216 ctrl = rd32(E1000_CTRL); 4224 ctrl = rd32(E1000_CTRL);
4217 /* Links status message must follow this format */ 4225 /* Links status message must follow this format */
4218 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " 4226 netdev_info(netdev,
4219 "Duplex, Flow Control: %s\n", 4227 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4220 netdev->name, 4228 netdev->name,
4221 adapter->link_speed, 4229 adapter->link_speed,
4222 adapter->link_duplex == FULL_DUPLEX ? 4230 adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
4242 4250
4243 /* check for thermal sensor event */ 4251 /* check for thermal sensor event */
4244 if (igb_thermal_sensor_event(hw, 4252 if (igb_thermal_sensor_event(hw,
4245 E1000_THSTAT_LINK_THROTTLE)) { 4253 E1000_THSTAT_LINK_THROTTLE))
4246 netdev_info(netdev, "The network adapter link " 4254 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
4247 "speed was downshifted because it "
4248 "overheated\n");
4249 }
4250 4255
4251 /* adjust timeout factor according to speed/duplex */ 4256 /* adjust timeout factor according to speed/duplex */
4252 adapter->tx_timeout_factor = 1; 4257 adapter->tx_timeout_factor = 1;
@@ -4277,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
4277 /* check for thermal sensor event */ 4282 /* check for thermal sensor event */
4278 if (igb_thermal_sensor_event(hw, 4283 if (igb_thermal_sensor_event(hw,
4279 E1000_THSTAT_PWR_DOWN)) { 4284 E1000_THSTAT_PWR_DOWN)) {
4280 netdev_err(netdev, "The network adapter was " 4285 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
4281 "stopped because it overheated\n");
4282 } 4286 }
4283 4287
4284 /* Links status message must follow this format */ 4288 /* Links status message must follow this format */
4285 printk(KERN_INFO "igb: %s NIC Link is Down\n", 4289 netdev_info(netdev, "igb: %s NIC Link is Down\n",
4286 netdev->name); 4290 netdev->name);
4287 netif_carrier_off(netdev); 4291 netif_carrier_off(netdev);
4288 4292
@@ -4344,6 +4348,7 @@ static void igb_watchdog_task(struct work_struct *work)
4344 /* Cause software interrupt to ensure Rx ring is cleaned */ 4348 /* Cause software interrupt to ensure Rx ring is cleaned */
4345 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 4349 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
4346 u32 eics = 0; 4350 u32 eics = 0;
4351
4347 for (i = 0; i < adapter->num_q_vectors; i++) 4352 for (i = 0; i < adapter->num_q_vectors; i++)
4348 eics |= adapter->q_vector[i]->eims_value; 4353 eics |= adapter->q_vector[i]->eims_value;
4349 wr32(E1000_EICS, eics); 4354 wr32(E1000_EICS, eics);
@@ -4483,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
4483 case low_latency: /* 50 usec aka 20000 ints/s */ 4488 case low_latency: /* 50 usec aka 20000 ints/s */
4484 if (bytes > 10000) { 4489 if (bytes > 10000) {
4485 /* this if handles the TSO accounting */ 4490 /* this if handles the TSO accounting */
4486 if (bytes/packets > 8000) { 4491 if (bytes/packets > 8000)
4487 itrval = bulk_latency; 4492 itrval = bulk_latency;
4488 } else if ((packets < 10) || ((bytes/packets) > 1200)) { 4493 else if ((packets < 10) || ((bytes/packets) > 1200))
4489 itrval = bulk_latency; 4494 itrval = bulk_latency;
4490 } else if ((packets > 35)) { 4495 else if ((packets > 35))
4491 itrval = lowest_latency; 4496 itrval = lowest_latency;
4492 }
4493 } else if (bytes/packets > 2000) { 4497 } else if (bytes/packets > 2000) {
4494 itrval = bulk_latency; 4498 itrval = bulk_latency;
4495 } else if (packets <= 2 && bytes < 512) { 4499 } else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4679,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4675 return; 4679 return;
4676 } else { 4680 } else {
4677 u8 l4_hdr = 0; 4681 u8 l4_hdr = 0;
4682
4678 switch (first->protocol) { 4683 switch (first->protocol) {
4679 case htons(ETH_P_IP): 4684 case htons(ETH_P_IP):
4680 vlan_macip_lens |= skb_network_header_len(skb); 4685 vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4962 */ 4967 */
4963 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { 4968 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4964 unsigned short f; 4969 unsigned short f;
4970
4965 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 4971 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4966 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 4972 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4967 } else { 4973 } else {
@@ -5140,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5140 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 5146 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5141 5147
5142 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 5148 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
5143 msleep(1); 5149 usleep_range(1000, 2000);
5144 5150
5145 /* igb_down has a dependency on max_frame_size */ 5151 /* igb_down has a dependency on max_frame_size */
5146 adapter->max_frame_size = max_frame; 5152 adapter->max_frame_size = max_frame;
@@ -5621,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5621 vmolr |= E1000_VMOLR_MPME; 5627 vmolr |= E1000_VMOLR_MPME;
5622 } else if (vf_data->num_vf_mc_hashes) { 5628 } else if (vf_data->num_vf_mc_hashes) {
5623 int j; 5629 int j;
5630
5624 vmolr |= E1000_VMOLR_ROMPE; 5631 vmolr |= E1000_VMOLR_ROMPE;
5625 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 5632 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5626 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 5633 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5672 5679
5673 for (i = 0; i < adapter->vfs_allocated_count; i++) { 5680 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5674 u32 vmolr = rd32(E1000_VMOLR(i)); 5681 u32 vmolr = rd32(E1000_VMOLR(i));
5682
5675 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 5683 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5676 5684
5677 vf_data = &adapter->vf_data[i]; 5685 vf_data = &adapter->vf_data[i];
@@ -5770,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5770 5778
5771 if (!adapter->vf_data[vf].vlans_enabled) { 5779 if (!adapter->vf_data[vf].vlans_enabled) {
5772 u32 size; 5780 u32 size;
5781
5773 reg = rd32(E1000_VMOLR(vf)); 5782 reg = rd32(E1000_VMOLR(vf));
5774 size = reg & E1000_VMOLR_RLPML_MASK; 5783 size = reg & E1000_VMOLR_RLPML_MASK;
5775 size += 4; 5784 size += 4;
@@ -5798,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5798 adapter->vf_data[vf].vlans_enabled--; 5807 adapter->vf_data[vf].vlans_enabled--;
5799 if (!adapter->vf_data[vf].vlans_enabled) { 5808 if (!adapter->vf_data[vf].vlans_enabled) {
5800 u32 size; 5809 u32 size;
5810
5801 reg = rd32(E1000_VMOLR(vf)); 5811 reg = rd32(E1000_VMOLR(vf));
5802 size = reg & E1000_VMOLR_RLPML_MASK; 5812 size = reg & E1000_VMOLR_RLPML_MASK;
5803 size -= 4; 5813 size -= 4;
@@ -5902,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5902 */ 5912 */
5903 if (!add && (adapter->netdev->flags & IFF_PROMISC)) { 5913 if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
5904 u32 vlvf, bits; 5914 u32 vlvf, bits;
5905
5906 int regndx = igb_find_vlvf_entry(adapter, vid); 5915 int regndx = igb_find_vlvf_entry(adapter, vid);
5916
5907 if (regndx < 0) 5917 if (regndx < 0)
5908 goto out; 5918 goto out;
5909 /* See if any other pools are set for this VLAN filter 5919 /* See if any other pools are set for this VLAN filter
@@ -6494,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6494 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 6504 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6495 6505
6496 /* transfer page from old buffer to new buffer */ 6506 /* transfer page from old buffer to new buffer */
6497 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer)); 6507 *new_buff = *old_buff;
6498 6508
6499 /* sync the buffer for use by the device */ 6509 /* sync the buffer for use by the device */
6500 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 6510 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6973,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6963 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6973 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6964 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6974 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6965 u16 vid; 6975 u16 vid;
6976
6966 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && 6977 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6967 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 6978 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6968 vid = be16_to_cpu(rx_desc->wb.upper.vlan); 6979 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7051 if (cleaned_count) 7062 if (cleaned_count)
7052 igb_alloc_rx_buffers(rx_ring, cleaned_count); 7063 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7053 7064
7054 return (total_packets < budget); 7065 return total_packets < budget;
7055} 7066}
7056 7067
7057static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 7068static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7172 break; 7183 break;
7173 case SIOCGMIIREG: 7184 case SIOCGMIIREG:
7174 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 7185 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
7175 &data->val_out)) 7186 &data->val_out))
7176 return -EIO; 7187 return -EIO;
7177 break; 7188 break;
7178 case SIOCSMIIREG: 7189 case SIOCSMIIREG:
@@ -7873,7 +7884,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7873 } 7884 }
7874} 7885}
7875 7886
7876static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 7887static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
7888 int min_tx_rate, int max_tx_rate)
7877{ 7889{
7878 struct igb_adapter *adapter = netdev_priv(netdev); 7890 struct igb_adapter *adapter = netdev_priv(netdev);
7879 struct e1000_hw *hw = &adapter->hw; 7891 struct e1000_hw *hw = &adapter->hw;
@@ -7882,15 +7894,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7882 if (hw->mac.type != e1000_82576) 7894 if (hw->mac.type != e1000_82576)
7883 return -EOPNOTSUPP; 7895 return -EOPNOTSUPP;
7884 7896
7897 if (min_tx_rate)
7898 return -EINVAL;
7899
7885 actual_link_speed = igb_link_mbps(adapter->link_speed); 7900 actual_link_speed = igb_link_mbps(adapter->link_speed);
7886 if ((vf >= adapter->vfs_allocated_count) || 7901 if ((vf >= adapter->vfs_allocated_count) ||
7887 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || 7902 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7888 (tx_rate < 0) || (tx_rate > actual_link_speed)) 7903 (max_tx_rate < 0) ||
7904 (max_tx_rate > actual_link_speed))
7889 return -EINVAL; 7905 return -EINVAL;
7890 7906
7891 adapter->vf_rate_link_speed = actual_link_speed; 7907 adapter->vf_rate_link_speed = actual_link_speed;
7892 adapter->vf_data[vf].tx_rate = (u16)tx_rate; 7908 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
7893 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); 7909 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
7894 7910
7895 return 0; 7911 return 0;
7896} 7912}
@@ -7919,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7919 wr32(reg_offset, reg_val); 7935 wr32(reg_offset, reg_val);
7920 7936
7921 adapter->vf_data[vf].spoofchk_enabled = setting; 7937 adapter->vf_data[vf].spoofchk_enabled = setting;
7922 return E1000_SUCCESS; 7938 return 0;
7923} 7939}
7924 7940
7925static int igb_ndo_get_vf_config(struct net_device *netdev, 7941static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7930,7 +7946,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
7930 return -EINVAL; 7946 return -EINVAL;
7931 ivi->vf = vf; 7947 ivi->vf = vf;
7932 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 7948 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
7933 ivi->tx_rate = adapter->vf_data[vf].tx_rate; 7949 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
7950 ivi->min_tx_rate = 0;
7934 ivi->vlan = adapter->vf_data[vf].pf_vlan; 7951 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7935 ivi->qos = adapter->vf_data[vf].pf_qos; 7952 ivi->qos = adapter->vf_data[vf].pf_qos;
7936 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; 7953 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7955,11 +7972,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7955 reg = rd32(E1000_DTXCTL); 7972 reg = rd32(E1000_DTXCTL);
7956 reg |= E1000_DTXCTL_VLAN_ADDED; 7973 reg |= E1000_DTXCTL_VLAN_ADDED;
7957 wr32(E1000_DTXCTL, reg); 7974 wr32(E1000_DTXCTL, reg);
7975 /* Fall through */
7958 case e1000_82580: 7976 case e1000_82580:
7959 /* enable replication vlan tag stripping */ 7977 /* enable replication vlan tag stripping */
7960 reg = rd32(E1000_RPLOLR); 7978 reg = rd32(E1000_RPLOLR);
7961 reg |= E1000_RPLOLR_STRVLAN; 7979 reg |= E1000_RPLOLR_STRVLAN;
7962 wr32(E1000_RPLOLR, reg); 7980 wr32(E1000_RPLOLR, reg);
7981 /* Fall through */
7963 case e1000_i350: 7982 case e1000_i350:
7964 /* none of the above registers are supported by i350 */ 7983 /* none of the above registers are supported by i350 */
7965 break; 7984 break;
@@ -8049,6 +8068,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
8049 } /* endif adapter->dmac is not disabled */ 8068 } /* endif adapter->dmac is not disabled */
8050 } else if (hw->mac.type == e1000_82580) { 8069 } else if (hw->mac.type == e1000_82580) {
8051 u32 reg = rd32(E1000_PCIEMISC); 8070 u32 reg = rd32(E1000_PCIEMISC);
8071
8052 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); 8072 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
8053 wr32(E1000_DMACR, 0); 8073 wr32(E1000_DMACR, 0);
8054 } 8074 }
@@ -8077,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8077 8097
8078 swfw_mask = E1000_SWFW_PHY0_SM; 8098 swfw_mask = E1000_SWFW_PHY0_SM;
8079 8099
8080 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) 8100 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8081 != E1000_SUCCESS)
8082 return E1000_ERR_SWFW_SYNC; 8101 return E1000_ERR_SWFW_SYNC;
8083 8102
8084 status = i2c_smbus_read_byte_data(this_client, byte_offset); 8103 status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -8088,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8088 return E1000_ERR_I2C; 8107 return E1000_ERR_I2C;
8089 else { 8108 else {
8090 *data = status; 8109 *data = status;
8091 return E1000_SUCCESS; 8110 return 0;
8092 } 8111 }
8093} 8112}
8094 8113
@@ -8113,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8113 if (!this_client) 8132 if (!this_client)
8114 return E1000_ERR_I2C; 8133 return E1000_ERR_I2C;
8115 8134
8116 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) 8135 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8117 return E1000_ERR_SWFW_SYNC; 8136 return E1000_ERR_SWFW_SYNC;
8118 status = i2c_smbus_write_byte_data(this_client, byte_offset, data); 8137 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
8119 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 8138 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -8121,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8121 if (status) 8140 if (status)
8122 return E1000_ERR_I2C; 8141 return E1000_ERR_I2C;
8123 else 8142 else
8124 return E1000_SUCCESS; 8143 return 0;
8125 8144
8126} 8145}
8127 8146
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab25e49365f7..794c139f0cc0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -360,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
360 return 0; 360 return 0;
361} 361}
362 362
363static int igb_ptp_enable(struct ptp_clock_info *ptp, 363static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
364 struct ptp_clock_request *rq, int on) 364 struct ptp_clock_request *rq, int on)
365{ 365{
366 return -EOPNOTSUPP; 366 return -EOPNOTSUPP;
367} 367}
@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
560 -EFAULT : 0; 560 -EFAULT : 0;
561} 561}
562
562/** 563/**
563 * igb_ptp_set_ts_config - control hardware time stamping 564 * igb_ptp_set_timestamp_mode - setup hardware for timestamping
564 * @netdev: 565 * @adapter: networking device structure
565 * @ifreq: 566 * @config: hwtstamp configuration
566 * 567 *
567 * Outgoing time stamping can be enabled and disabled. Play nice and 568 * Outgoing time stamping can be enabled and disabled. Play nice and
568 * disable it when requested, although it shouldn't case any overhead 569 * disable it when requested, although it shouldn't case any overhead
@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
575 * type has to be specified. Matching the kind of event packet is 576 * type has to be specified. Matching the kind of event packet is
576 * not supported, with the exception of "all V2 events regardless of 577 * not supported, with the exception of "all V2 events regardless of
577 * level 2 or 4". 578 * level 2 or 4".
578 **/ 579 */
579int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) 580static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
581 struct hwtstamp_config *config)
580{ 582{
581 struct igb_adapter *adapter = netdev_priv(netdev);
582 struct e1000_hw *hw = &adapter->hw; 583 struct e1000_hw *hw = &adapter->hw;
583 struct hwtstamp_config *config = &adapter->tstamp_config;
584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
586 u32 tsync_rx_cfg = 0; 586 u32 tsync_rx_cfg = 0;
@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
588 bool is_l2 = false; 588 bool is_l2 = false;
589 u32 regval; 589 u32 regval;
590 590
591 if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
592 return -EFAULT;
593
594 /* reserved for future extensions */ 591 /* reserved for future extensions */
595 if (config->flags) 592 if (config->flags)
596 return -EINVAL; 593 return -EINVAL;
@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
725 regval = rd32(E1000_RXSTMPL); 722 regval = rd32(E1000_RXSTMPL);
726 regval = rd32(E1000_RXSTMPH); 723 regval = rd32(E1000_RXSTMPH);
727 724
728 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 725 return 0;
726}
727
728/**
729 * igb_ptp_set_ts_config - set hardware time stamping config
730 * @netdev:
731 * @ifreq:
732 *
733 **/
734int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
735{
736 struct igb_adapter *adapter = netdev_priv(netdev);
737 struct hwtstamp_config config;
738 int err;
739
740 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
741 return -EFAULT;
742
743 err = igb_ptp_set_timestamp_mode(adapter, &config);
744 if (err)
745 return err;
746
747 /* save these settings for future reference */
748 memcpy(&adapter->tstamp_config, &config,
749 sizeof(adapter->tstamp_config));
750
751 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
729 -EFAULT : 0; 752 -EFAULT : 0;
730} 753}
731 754
@@ -745,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
745 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; 768 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
746 adapter->ptp_caps.gettime = igb_ptp_gettime_82576; 769 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
747 adapter->ptp_caps.settime = igb_ptp_settime_82576; 770 adapter->ptp_caps.settime = igb_ptp_settime_82576;
748 adapter->ptp_caps.enable = igb_ptp_enable; 771 adapter->ptp_caps.enable = igb_ptp_feature_enable;
749 adapter->cc.read = igb_ptp_read_82576; 772 adapter->cc.read = igb_ptp_read_82576;
750 adapter->cc.mask = CLOCKSOURCE_MASK(64); 773 adapter->cc.mask = CLOCKSOURCE_MASK(64);
751 adapter->cc.mult = 1; 774 adapter->cc.mult = 1;
@@ -765,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
765 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; 788 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
766 adapter->ptp_caps.gettime = igb_ptp_gettime_82576; 789 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
767 adapter->ptp_caps.settime = igb_ptp_settime_82576; 790 adapter->ptp_caps.settime = igb_ptp_settime_82576;
768 adapter->ptp_caps.enable = igb_ptp_enable; 791 adapter->ptp_caps.enable = igb_ptp_feature_enable;
769 adapter->cc.read = igb_ptp_read_82580; 792 adapter->cc.read = igb_ptp_read_82580;
770 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 793 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
771 adapter->cc.mult = 1; 794 adapter->cc.mult = 1;
@@ -784,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
784 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; 807 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
785 adapter->ptp_caps.gettime = igb_ptp_gettime_i210; 808 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
786 adapter->ptp_caps.settime = igb_ptp_settime_i210; 809 adapter->ptp_caps.settime = igb_ptp_settime_i210;
787 adapter->ptp_caps.enable = igb_ptp_enable; 810 adapter->ptp_caps.enable = igb_ptp_feature_enable;
788 /* Enable the timer functions by clearing bit 31. */ 811 /* Enable the timer functions by clearing bit 31. */
789 wr32(E1000_TSAUXC, 0x0); 812 wr32(E1000_TSAUXC, 0x0);
790 break; 813 break;
@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
820 wr32(E1000_IMS, E1000_IMS_TS); 843 wr32(E1000_IMS, E1000_IMS_TS);
821 } 844 }
822 845
846 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
847 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
848
823 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 849 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
824 &adapter->pdev->dev); 850 &adapter->pdev->dev);
825 if (IS_ERR(adapter->ptp_clock)) { 851 if (IS_ERR(adapter->ptp_clock)) {
@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
884 return; 910 return;
885 911
886 /* reset the tstamp_config */ 912 /* reset the tstamp_config */
887 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); 913 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
888 914
889 switch (adapter->hw.mac.type) { 915 switch (adapter->hw.mac.type) {
890 case e1000_82576: 916 case e1000_82576:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07943f4..2178f87e9f61 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
101 else 101 else
102 ecmd->duplex = DUPLEX_HALF; 102 ecmd->duplex = DUPLEX_HALF;
103 } else { 103 } else {
104 ethtool_cmd_speed_set(ecmd, -1); 104 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
105 ecmd->duplex = -1; 105 ecmd->duplex = DUPLEX_UNKNOWN;
106 } 106 }
107 107
108 ecmd->autoneg = AUTONEG_DISABLE; 108 ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
119static void igbvf_get_pauseparam(struct net_device *netdev, 119static void igbvf_get_pauseparam(struct net_device *netdev,
120 struct ethtool_pauseparam *pause) 120 struct ethtool_pauseparam *pause)
121{ 121{
122 return;
123} 122}
124 123
125static int igbvf_set_pauseparam(struct net_device *netdev, 124static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
476 475
477void igbvf_set_ethtool_ops(struct net_device *netdev) 476void igbvf_set_ethtool_ops(struct net_device *netdev)
478{ 477{
479 SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops); 478 netdev->ethtool_ops = &igbvf_ethtool_ops;
480} 479}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2f8e36..b311e9e710d2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
107 ethtool_cmd_speed_set(ecmd, SPEED_10000); 107 ethtool_cmd_speed_set(ecmd, SPEED_10000);
108 ecmd->duplex = DUPLEX_FULL; 108 ecmd->duplex = DUPLEX_FULL;
109 } else { 109 } else {
110 ethtool_cmd_speed_set(ecmd, -1); 110 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
111 ecmd->duplex = -1; 111 ecmd->duplex = DUPLEX_UNKNOWN;
112 } 112 }
113 113
114 ecmd->autoneg = AUTONEG_DISABLE; 114 ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
656 656
657void ixgb_set_ethtool_ops(struct net_device *netdev) 657void ixgb_set_ethtool_ops(struct net_device *netdev)
658{ 658{
659 SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops); 659 netdev->ethtool_ops = &ixgb_ethtool_ops;
660} 660}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..ac9f2148cdc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
155struct vf_macvlans { 155struct vf_macvlans {
156 struct list_head l; 156 struct list_head l;
157 int vf; 157 int vf;
158 int rar_entry;
159 bool free; 158 bool free;
160 bool is_macvlan; 159 bool is_macvlan;
161 u8 vf_macvlan[ETH_ALEN]; 160 u8 vf_macvlan[ETH_ALEN];
@@ -363,7 +362,7 @@ struct ixgbe_ring_container {
363 for (pos = (head).ring; pos != NULL; pos = pos->next) 362 for (pos = (head).ring; pos != NULL; pos = pos->next)
364 363
365#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 364#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
366 ? 8 : 1) 365 ? 8 : 1)
367#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 366#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
368 367
369/* MAX_Q_VECTORS of these are allocated, 368/* MAX_Q_VECTORS of these are allocated,
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
613#define MAX_MSIX_VECTORS_82598 18 612#define MAX_MSIX_VECTORS_82598 18
614#define MAX_Q_VECTORS_82598 16 613#define MAX_Q_VECTORS_82598 16
615 614
615struct ixgbe_mac_addr {
616 u8 addr[ETH_ALEN];
617 u16 queue;
618 u16 state; /* bitmask */
619};
620#define IXGBE_MAC_STATE_DEFAULT 0x1
621#define IXGBE_MAC_STATE_MODIFIED 0x2
622#define IXGBE_MAC_STATE_IN_USE 0x4
623
616#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 624#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
617#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 625#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
618 626
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
785 793
786 u32 timer_event_accumulator; 794 u32 timer_event_accumulator;
787 u32 vferr_refcount; 795 u32 vferr_refcount;
796 struct ixgbe_mac_addr *mac_table;
788 struct kobject *info_kobj; 797 struct kobject *info_kobj;
789#ifdef CONFIG_IXGBE_HWMON 798#ifdef CONFIG_IXGBE_HWMON
790 struct hwmon_buff *ixgbe_hwmon_buff; 799 struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
863int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 872int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
864int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 873int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
865 u16 subdevice_id); 874 u16 subdevice_id);
875#ifdef CONFIG_PCI_IOV
876void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
877#endif
878int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
879 u8 *addr, u16 queue);
880int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
881 u8 *addr, u16 queue);
866void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 882void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
867netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, 883netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
868 struct ixgbe_ring *); 884 struct ixgbe_ring *);
@@ -941,6 +957,7 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
941} 957}
942 958
943void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 959void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
960void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 961void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 962void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 963void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..15609331ec17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -41,10 +41,10 @@
41#define IXGBE_82598_RX_PB_SIZE 512 41#define IXGBE_82598_RX_PB_SIZE 512
42 42
43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed, 44 ixgbe_link_speed speed,
45 bool autoneg_wait_to_complete); 45 bool autoneg_wait_to_complete);
46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
47 u8 *eeprom_data); 47 u8 *eeprom_data);
48 48
49/** 49/**
50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
141 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 141 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
142 phy->ops.get_firmware_version = 142 phy->ops.get_firmware_version =
143 &ixgbe_get_phy_firmware_version_tnx; 143 &ixgbe_get_phy_firmware_version_tnx;
144 break; 144 break;
145 case ixgbe_phy_nl: 145 case ixgbe_phy_nl:
146 phy->ops.reset = &ixgbe_reset_phy_nl; 146 phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 156
157 /* Check to see if SFP+ module is supported */ 157 /* Check to see if SFP+ module is supported */
158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
159 &list_offset, 159 &list_offset,
160 &data_offset); 160 &data_offset);
161 if (ret_val != 0) { 161 if (ret_val != 0) {
162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
163 goto out; 163 goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
219 * Determines the link capabilities by reading the AUTOC register. 219 * Determines the link capabilities by reading the AUTOC register.
220 **/ 220 **/
221static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 221static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
222 ixgbe_link_speed *speed, 222 ixgbe_link_speed *speed,
223 bool *autoneg) 223 bool *autoneg)
224{ 224{
225 s32 status = 0; 225 s32 status = 0;
226 u32 autoc = 0; 226 u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
337 int i; 337 int i;
338 bool link_up; 338 bool link_up;
339 339
340 /* 340 /* Validate the water mark configuration */
341 * Validate the water mark configuration for packet buffer 0. Zero 341 if (!hw->fc.pause_time) {
342 * water marks indicate that the packet buffer was not configured
343 * and the watermarks for packet buffer 0 should always be configured.
344 */
345 if (!hw->fc.low_water ||
346 !hw->fc.high_water[0] ||
347 !hw->fc.pause_time) {
348 hw_dbg(hw, "Invalid water mark configuration\n");
349 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 342 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
350 goto out; 343 goto out;
351 } 344 }
352 345
346 /* Low water mark of zero causes XOFF floods */
347 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
348 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
349 hw->fc.high_water[i]) {
350 if (!hw->fc.low_water[i] ||
351 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
352 hw_dbg(hw, "Invalid water mark configuration\n");
353 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
354 goto out;
355 }
356 }
357 }
358
353 /* 359 /*
354 * On 82598 having Rx FC on causes resets while doing 1G 360 * On 82598 having Rx FC on causes resets while doing 1G
355 * so if it's on turn it off once we know link_speed. For 361 * so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
432 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 438 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
433 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 439 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
434 440
435 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
436
437 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 441 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
438 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 442 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
439 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 443 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
440 hw->fc.high_water[i]) { 444 hw->fc.high_water[i]) {
445 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
441 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 446 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
442 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 447 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 448 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
468 * Restarts the link. Performs autonegotiation if needed. 473 * Restarts the link. Performs autonegotiation if needed.
469 **/ 474 **/
470static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 475static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
471 bool autoneg_wait_to_complete) 476 bool autoneg_wait_to_complete)
472{ 477{
473 u32 autoc_reg; 478 u32 autoc_reg;
474 u32 links_reg; 479 u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
550 * Reads the links register to determine if link is up and the current speed 555 * Reads the links register to determine if link is up and the current speed
551 **/ 556 **/
552static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 557static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
553 ixgbe_link_speed *speed, bool *link_up, 558 ixgbe_link_speed *speed, bool *link_up,
554 bool link_up_wait_to_complete) 559 bool link_up_wait_to_complete)
555{ 560{
556 u32 links_reg; 561 u32 links_reg;
557 u32 i; 562 u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
567 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 572 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
568 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 573 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
569 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 574 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
570 &adapt_comp_reg); 575 &adapt_comp_reg);
571 if (link_up_wait_to_complete) { 576 if (link_up_wait_to_complete) {
572 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 577 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
573 if ((link_reg & 1) && 578 if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
579 } 584 }
580 msleep(100); 585 msleep(100);
581 hw->phy.ops.read_reg(hw, 0xC79F, 586 hw->phy.ops.read_reg(hw, 0xC79F,
582 MDIO_MMD_PMAPMD, 587 MDIO_MMD_PMAPMD,
583 &link_reg); 588 &link_reg);
584 hw->phy.ops.read_reg(hw, 0xC00C, 589 hw->phy.ops.read_reg(hw, 0xC00C,
585 MDIO_MMD_PMAPMD, 590 MDIO_MMD_PMAPMD,
586 &adapt_comp_reg); 591 &adapt_comp_reg);
587 } 592 }
588 } else { 593 } else {
589 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 594 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
656 661
657 /* Set KX4/KX support according to speed requested */ 662 /* Set KX4/KX support according to speed requested */
658 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 663 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
659 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 664 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
660 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 665 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
661 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 666 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
662 autoc |= IXGBE_AUTOC_KX4_SUPP; 667 autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
689 * Sets the link speed in the AUTOC register in the MAC and restarts link. 694 * Sets the link speed in the AUTOC register in the MAC and restarts link.
690 **/ 695 **/
691static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 696static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
692 ixgbe_link_speed speed, 697 ixgbe_link_speed speed,
693 bool autoneg_wait_to_complete) 698 bool autoneg_wait_to_complete)
694{ 699{
695 s32 status; 700 s32 status;
696 701
697 /* Setup the PHY according to input speed */ 702 /* Setup the PHY according to input speed */
698 status = hw->phy.ops.setup_link_speed(hw, speed, 703 status = hw->phy.ops.setup_link_speed(hw, speed,
699 autoneg_wait_to_complete); 704 autoneg_wait_to_complete);
700 /* Set up MAC */ 705 /* Set up MAC */
701 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 706 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
702 707
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
735 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 740 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
736 /* Enable Tx Atlas so packets can be transmitted again */ 741 /* Enable Tx Atlas so packets can be transmitted again */
737 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 742 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
738 &analog_val); 743 &analog_val);
739 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 744 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
740 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 745 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
741 analog_val); 746 analog_val);
742 747
743 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 748 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
744 &analog_val); 749 &analog_val);
745 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 750 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
746 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 751 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
747 analog_val); 752 analog_val);
748 753
749 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 754 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
750 &analog_val); 755 &analog_val);
751 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 756 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
752 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 757 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
753 analog_val); 758 analog_val);
754 759
755 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 760 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
756 &analog_val); 761 &analog_val);
757 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 762 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
758 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 763 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
759 analog_val); 764 analog_val);
760 } 765 }
761 766
762 /* Reset PHY */ 767 /* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
955 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 960 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
956 for (offset = 0; offset < hw->mac.vft_size; offset++) 961 for (offset = 0; offset < hw->mac.vft_size; offset++)
957 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 962 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
958 0); 963 0);
959 964
960 return 0; 965 return 0;
961} 966}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
973 u32 atlas_ctl; 978 u32 atlas_ctl;
974 979
975 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 980 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
976 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 981 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
977 IXGBE_WRITE_FLUSH(hw); 982 IXGBE_WRITE_FLUSH(hw);
978 udelay(10); 983 udelay(10);
979 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 984 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1273 /* Setup Tx packet buffer sizes */ 1278 /* Setup Tx packet buffer sizes */
1274 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1279 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1275 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1280 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1276
1277 return;
1278} 1281}
1279 1282
1280static struct ixgbe_mac_operations mac_ops_82598 = { 1283static struct ixgbe_mac_operations mac_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index f32b3dd1ba8e..bc7c924240a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed, 48 ixgbe_link_speed speed,
49 bool autoneg_wait_to_complete); 49 bool autoneg_wait_to_complete);
50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
51 ixgbe_link_speed speed, 51 ixgbe_link_speed speed,
52 bool autoneg_wait_to_complete); 52 bool autoneg_wait_to_complete);
53static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); 53static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
55 bool autoneg_wait_to_complete); 55 bool autoneg_wait_to_complete);
56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
57 ixgbe_link_speed speed, 57 ixgbe_link_speed speed,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed speed, 60 ixgbe_link_speed speed,
61 bool autoneg_wait_to_complete); 61 bool autoneg_wait_to_complete);
62static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 62static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
63static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 63static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
64 u8 dev_addr, u8 *data); 64 u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
97 !ixgbe_mng_enabled(hw)) { 97 !ixgbe_mng_enabled(hw)) {
98 mac->ops.disable_tx_laser = 98 mac->ops.disable_tx_laser =
99 &ixgbe_disable_tx_laser_multispeed_fiber; 99 &ixgbe_disable_tx_laser_multispeed_fiber;
100 mac->ops.enable_tx_laser = 100 mac->ops.enable_tx_laser =
101 &ixgbe_enable_tx_laser_multispeed_fiber; 101 &ixgbe_enable_tx_laser_multispeed_fiber;
102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
103 } else { 103 } else {
104 mac->ops.disable_tx_laser = NULL; 104 mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
132 hw->phy.ops.reset = NULL; 132 hw->phy.ops.reset = NULL;
133 133
134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
135 &data_offset); 135 &data_offset);
136 if (ret_val != 0) 136 if (ret_val != 0)
137 goto setup_sfp_out; 137 goto setup_sfp_out;
138 138
139 /* PHY config will finish before releasing the semaphore */ 139 /* PHY config will finish before releasing the semaphore */
140 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 140 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
141 IXGBE_GSSR_MAC_CSR_SM); 141 IXGBE_GSSR_MAC_CSR_SM);
142 if (ret_val != 0) { 142 if (ret_val != 0) {
143 ret_val = IXGBE_ERR_SWFW_SYNC; 143 ret_val = IXGBE_ERR_SWFW_SYNC;
144 goto setup_sfp_out; 144 goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
334 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 334 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
336 phy->ops.get_firmware_version = 336 phy->ops.get_firmware_version =
337 &ixgbe_get_phy_firmware_version_tnx; 337 &ixgbe_get_phy_firmware_version_tnx;
338 break; 338 break;
339 default: 339 default:
340 break; 340 break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
352 * Determines the link capabilities by reading the AUTOC register. 352 * Determines the link capabilities by reading the AUTOC register.
353 **/ 353 **/
354static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 354static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
355 ixgbe_link_speed *speed, 355 ixgbe_link_speed *speed,
356 bool *autoneg) 356 bool *autoneg)
357{ 357{
358 s32 status = 0; 358 s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
543 * Restarts the link. Performs autonegotiation if needed. 543 * Restarts the link. Performs autonegotiation if needed.
544 **/ 544 **/
545static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 545static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
546 bool autoneg_wait_to_complete) 546 bool autoneg_wait_to_complete)
547{ 547{
548 u32 autoc_reg; 548 u32 autoc_reg;
549 u32 links_reg; 549 u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
672 * Set the link speed in the AUTOC register and restarts link. 672 * Set the link speed in the AUTOC register and restarts link.
673 **/ 673 **/
674static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 674static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
675 ixgbe_link_speed speed, 675 ixgbe_link_speed speed,
676 bool autoneg_wait_to_complete) 676 bool autoneg_wait_to_complete)
677{ 677{
678 s32 status = 0; 678 s32 status = 0;
679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
820 */ 820 */
821 if (speedcnt > 1) 821 if (speedcnt > 1)
822 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 822 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
823 highest_link_speed, 823 highest_link_speed,
824 autoneg_wait_to_complete); 824 autoneg_wait_to_complete);
825 825
826out: 826out:
827 /* Set autoneg_advertised value based on input link speed */ 827 /* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1010 autoc |= IXGBE_AUTOC_KX_SUPP; 1010 autoc |= IXGBE_AUTOC_KX_SUPP;
1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1014 /* Switch from 1G SFI to 10G SFI if requested */ 1014 /* Switch from 1G SFI to 10G SFI if requested */
1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1019 } 1019 }
1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1022 /* Switch from 10G SFI to 1G SFI if requested */ 1022 /* Switch from 10G SFI to 1G SFI if requested */
1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1051 } 1051 }
1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1053 status = 1053 status =
1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1055 hw_dbg(hw, "Autoneg did not complete.\n"); 1055 hw_dbg(hw, "Autoneg did not complete.\n");
1056 } 1056 }
1057 } 1057 }
@@ -1074,14 +1074,14 @@ out:
1074 * Restarts link on PHY and MAC based on settings passed in. 1074 * Restarts link on PHY and MAC based on settings passed in.
1075 **/ 1075 **/
1076static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1076static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1077 ixgbe_link_speed speed, 1077 ixgbe_link_speed speed,
1078 bool autoneg_wait_to_complete) 1078 bool autoneg_wait_to_complete)
1079{ 1079{
1080 s32 status; 1080 s32 status;
1081 1081
1082 /* Setup the PHY according to input speed */ 1082 /* Setup the PHY according to input speed */
1083 status = hw->phy.ops.setup_link_speed(hw, speed, 1083 status = hw->phy.ops.setup_link_speed(hw, speed,
1084 autoneg_wait_to_complete); 1084 autoneg_wait_to_complete);
1085 /* Set up MAC */ 1085 /* Set up MAC */
1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1087 1087
@@ -1224,7 +1224,7 @@ mac_reset_top:
1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1226 autoc2 |= (hw->mac.orig_autoc2 & 1226 autoc2 |= (hw->mac.orig_autoc2 &
1227 IXGBE_AUTOC2_UPPER_MASK); 1227 IXGBE_AUTOC2_UPPER_MASK);
1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1229 } 1229 }
1230 } 1230 }
@@ -1246,7 +1246,7 @@ mac_reset_top:
1246 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1246 /* Add the SAN MAC address to the RAR only if it's a valid address */
1247 if (is_valid_ether_addr(hw->mac.san_addr)) { 1247 if (is_valid_ether_addr(hw->mac.san_addr)) {
1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1249 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1249 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1250 1250
1251 /* Save the SAN MAC RAR index */ 1251 /* Save the SAN MAC RAR index */
1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
1257 1257
1258 /* Store the alternative WWNN/WWPN prefix */ 1258 /* Store the alternative WWNN/WWPN prefix */
1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1260 &hw->mac.wwpn_prefix); 1260 &hw->mac.wwpn_prefix);
1261 1261
1262reset_hw_out: 1262reset_hw_out:
1263 return status; 1263 return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1271{ 1271{
1272 int i; 1272 int i;
1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1274
1274 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1275 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1275 1276
1276 /* 1277 /*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1284 udelay(10); 1285 udelay(10);
1285 } 1286 }
1286 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1287 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1287 hw_dbg(hw, "Flow Director previous command isn't complete, " 1288 hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
1288 "aborting table re-initialization.\n");
1289 return IXGBE_ERR_FDIR_REINIT_FAILED; 1289 return IXGBE_ERR_FDIR_REINIT_FAILED;
1290 } 1290 }
1291 1291
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1299 * - write 0 to bit 8 of FDIRCMD register 1299 * - write 0 to bit 8 of FDIRCMD register
1300 */ 1300 */
1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1302 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1302 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1303 IXGBE_FDIRCMD_CLEARHT)); 1303 IXGBE_FDIRCMD_CLEARHT));
1304 IXGBE_WRITE_FLUSH(hw); 1304 IXGBE_WRITE_FLUSH(hw);
1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1306 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1306 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1307 ~IXGBE_FDIRCMD_CLEARHT)); 1307 ~IXGBE_FDIRCMD_CLEARHT));
1308 IXGBE_WRITE_FLUSH(hw); 1308 IXGBE_WRITE_FLUSH(hw);
1309 /* 1309 /*
1310 * Clear FDIR Hash register to clear any leftover hashes 1310 * Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1319 /* Poll init-done after we write FDIRCTRL register */ 1319 /* Poll init-done after we write FDIRCTRL register */
1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1322 IXGBE_FDIRCTRL_INIT_DONE) 1322 IXGBE_FDIRCTRL_INIT_DONE)
1323 break; 1323 break;
1324 usleep_range(1000, 2000); 1324 usleep_range(1000, 2000);
1325 } 1325 }
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1368 IXGBE_WRITE_FLUSH(hw); 1368 IXGBE_WRITE_FLUSH(hw);
1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1371 IXGBE_FDIRCTRL_INIT_DONE) 1371 IXGBE_FDIRCTRL_INIT_DONE)
1372 break; 1372 break;
1373 usleep_range(1000, 2000); 1373 usleep_range(1000, 2000);
1374 } 1374 }
@@ -1453,7 +1453,7 @@ do { \
1453 bucket_hash ^= hi_hash_dword >> n; \ 1453 bucket_hash ^= hi_hash_dword >> n; \
1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1455 sig_hash ^= hi_hash_dword << (16 - n); \ 1455 sig_hash ^= hi_hash_dword << (16 - n); \
1456} while (0); 1456} while (0)
1457 1457
1458/** 1458/**
1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1529 * @queue: queue index to direct traffic to 1529 * @queue: queue index to direct traffic to
1530 **/ 1530 **/
1531s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1531s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1532 union ixgbe_atr_hash_dword input, 1532 union ixgbe_atr_hash_dword input,
1533 union ixgbe_atr_hash_dword common, 1533 union ixgbe_atr_hash_dword common,
1534 u8 queue) 1534 u8 queue)
1535{ 1535{
1536 u64 fdirhashcmd; 1536 u64 fdirhashcmd;
1537 u32 fdircmd; 1537 u32 fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1555 1555
1556 /* configure FDIRCMD register */ 1556 /* configure FDIRCMD register */
1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1558 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1558 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1561 1561
@@ -1579,7 +1579,7 @@ do { \
1579 bucket_hash ^= lo_hash_dword >> n; \ 1579 bucket_hash ^= lo_hash_dword >> n; \
1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1581 bucket_hash ^= hi_hash_dword >> n; \ 1581 bucket_hash ^= hi_hash_dword >> n; \
1582} while (0); 1582} while (0)
1583 1583
1584/** 1584/**
1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1651static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1651static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1652{ 1652{
1653 u32 mask = ntohs(input_mask->formatted.dst_port); 1653 u32 mask = ntohs(input_mask->formatted.dst_port);
1654
1654 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1655 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1655 mask |= ntohs(input_mask->formatted.src_port); 1656 mask |= ntohs(input_mask->formatted.src_port);
1656 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1657 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1885 u32 core_ctl; 1886 u32 core_ctl;
1886 1887
1887 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1888 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1888 (reg << 8)); 1889 (reg << 8));
1889 IXGBE_WRITE_FLUSH(hw); 1890 IXGBE_WRITE_FLUSH(hw);
1890 udelay(10); 1891 udelay(10);
1891 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1892 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..4e5385a2a465 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
44 u16 count); 44 u16 count);
45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
271 **/ 271 **/
272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
273{ 273{
274 s32 ret_val;
274 u32 ctrl_ext; 275 u32 ctrl_ext;
275 276
276 /* Set the media type */ 277 /* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
292 IXGBE_WRITE_FLUSH(hw); 293 IXGBE_WRITE_FLUSH(hw);
293 294
294 /* Setup flow control */ 295 /* Setup flow control */
295 ixgbe_setup_fc(hw); 296 ret_val = ixgbe_setup_fc(hw);
297 if (!ret_val)
298 goto out;
296 299
297 /* Clear adapter stopped flag */ 300 /* Clear adapter stopped flag */
298 hw->adapter_stopped = false; 301 hw->adapter_stopped = false;
299 302
300 return 0; 303out:
304 return ret_val;
301} 305}
302 306
303/** 307/**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
481 * Reads the part number string from the EEPROM. 485 * Reads the part number string from the EEPROM.
482 **/ 486 **/
483s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 487s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
484 u32 pba_num_size) 488 u32 pba_num_size)
485{ 489{
486 s32 ret_val; 490 s32 ret_val;
487 u16 data; 491 u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
814 eeprom->address_bits = 16; 818 eeprom->address_bits = 16;
815 else 819 else
816 eeprom->address_bits = 8; 820 eeprom->address_bits = 8;
817 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " 821 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
818 "%d\n", eeprom->type, eeprom->word_size, 822 eeprom->type, eeprom->word_size, eeprom->address_bits);
819 eeprom->address_bits);
820 } 823 }
821 824
822 return 0; 825 return 0;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1388 } 1391 }
1389 1392
1390 if (i == timeout) { 1393 if (i == timeout) {
1391 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " 1394 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1392 "not granted.\n");
1393 /* 1395 /*
1394 * this release is particularly important because our attempts 1396 * this release is particularly important because our attempts
1395 * above to get the semaphore may have succeeded, and if there 1397 * above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1434 * was not granted because we don't have access to the EEPROM 1436 * was not granted because we don't have access to the EEPROM
1435 */ 1437 */
1436 if (i >= timeout) { 1438 if (i >= timeout) {
1437 hw_dbg(hw, "SWESMBI Software EEPROM semaphore " 1439 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1438 "not granted.\n");
1439 ixgbe_release_eeprom_semaphore(hw); 1440 ixgbe_release_eeprom_semaphore(hw);
1440 status = IXGBE_ERR_EEPROM; 1441 status = IXGBE_ERR_EEPROM;
1441 } 1442 }
1442 } else { 1443 } else {
1443 hw_dbg(hw, "Software semaphore SMBI between device drivers " 1444 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1444 "not granted.\n");
1445 } 1445 }
1446 1446
1447 return status; 1447 return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1483 */ 1483 */
1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1486 IXGBE_EEPROM_OPCODE_BITS); 1486 IXGBE_EEPROM_OPCODE_BITS);
1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1489 break; 1489 break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1532 * @count: number of bits to shift out 1532 * @count: number of bits to shift out
1533 **/ 1533 **/
1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1535 u16 count) 1535 u16 count)
1536{ 1536{
1537 u32 eec; 1537 u32 eec;
1538 u32 mask; 1538 u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1736 * caller does not need checksum_val, the value can be NULL. 1736 * caller does not need checksum_val, the value can be NULL.
1737 **/ 1737 **/
1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1739 u16 *checksum_val) 1739 u16 *checksum_val)
1740{ 1740{
1741 s32 status; 1741 s32 status;
1742 u16 checksum; 1742 u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1809 * Puts an ethernet address into a receive address register. 1809 * Puts an ethernet address into a receive address register.
1810 **/ 1810 **/
1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1812 u32 enable_addr) 1812 u32 enable_addr)
1813{ 1813{
1814 u32 rar_low, rar_high; 1814 u32 rar_low, rar_high;
1815 u32 rar_entries = hw->mac.num_rar_entries; 1815 u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2053 2053
2054 if (hw->addr_ctrl.mta_in_use > 0) 2054 if (hw->addr_ctrl.mta_in_use > 0)
2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2057 2057
2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2059 return 0; 2059 return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2071 2071
2072 if (a->mta_in_use > 0) 2072 if (a->mta_in_use > 0)
2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2074 hw->mac.mc_filter_type); 2074 hw->mac.mc_filter_type);
2075 2075
2076 return 0; 2076 return 0;
2077} 2077}
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2106 u32 fcrtl, fcrth; 2106 u32 fcrtl, fcrth;
2107 int i; 2107 int i;
2108 2108
2109 /* 2109 /* Validate the water mark configuration. */
2110 * Validate the water mark configuration for packet buffer 0. Zero 2110 if (!hw->fc.pause_time) {
2111 * water marks indicate that the packet buffer was not configured
2112 * and the watermarks for packet buffer 0 should always be configured.
2113 */
2114 if (!hw->fc.low_water ||
2115 !hw->fc.high_water[0] ||
2116 !hw->fc.pause_time) {
2117 hw_dbg(hw, "Invalid water mark configuration\n");
2118 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2111 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2119 goto out; 2112 goto out;
2120 } 2113 }
2121 2114
2115 /* Low water mark of zero causes XOFF floods */
2116 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2117 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2118 hw->fc.high_water[i]) {
2119 if (!hw->fc.low_water[i] ||
2120 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2121 hw_dbg(hw, "Invalid water mark configuration\n");
2122 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2123 goto out;
2124 }
2125 }
2126 }
2127
2122 /* Negotiate the fc mode to use */ 2128 /* Negotiate the fc mode to use */
2123 ixgbe_fc_autoneg(hw); 2129 ixgbe_fc_autoneg(hw);
2124 2130
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2181 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2187 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2182 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2188 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2183 2189
2184 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2185
2186 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2190 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2187 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2191 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2188 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2192 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2189 hw->fc.high_water[i]) { 2193 hw->fc.high_water[i]) {
2194 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2195 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2191 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2196 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2192 } else { 2197 } else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2654 2659
2655 /* For informational purposes only */ 2660 /* For informational purposes only */
2656 if (i >= IXGBE_MAX_SECRX_POLL) 2661 if (i >= IXGBE_MAX_SECRX_POLL)
2657 hw_dbg(hw, "Rx unit being enabled before security " 2662 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2658 "path fully disabled. Continuing with init.\n");
2659 2663
2660 return 0; 2664 return 0;
2661 2665
@@ -2782,7 +2786,7 @@ out:
2782 * get and set mac_addr routines. 2786 * get and set mac_addr routines.
2783 **/ 2787 **/
2784static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2788static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2785 u16 *san_mac_offset) 2789 u16 *san_mac_offset)
2786{ 2790{
2787 s32 ret_val; 2791 s32 ret_val;
2788 2792
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2828 hw->mac.ops.set_lan_id(hw); 2832 hw->mac.ops.set_lan_id(hw);
2829 /* apply the port offset to the address offset */ 2833 /* apply the port offset to the address offset */
2830 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2834 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2831 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2835 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2832 for (i = 0; i < 3; i++) { 2836 for (i = 0; i < 3; i++) {
2833 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2837 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2834 &san_mac_data); 2838 &san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3068 * Turn on/off specified VLAN in the VLAN filter table. 3072 * Turn on/off specified VLAN in the VLAN filter table.
3069 **/ 3073 **/
3070s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3074s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3071 bool vlan_on) 3075 bool vlan_on)
3072{ 3076{
3073 s32 regindex; 3077 s32 regindex;
3074 u32 bitindex; 3078 u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3190 * Ignore it. */ 3194 * Ignore it. */
3191 vfta_changed = false; 3195 vfta_changed = false;
3192 } 3196 }
3193 } 3197 } else {
3194 else
3195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3199 }
3196 } 3200 }
3197 3201
3198 if (vfta_changed) 3202 if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3292 * block to check the support for the alternative WWNN/WWPN prefix support. 3296 * block to check the support for the alternative WWNN/WWPN prefix support.
3293 **/ 3297 **/
3294s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3298s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3295 u16 *wwpn_prefix) 3299 u16 *wwpn_prefix)
3296{ 3300{
3297 u16 offset, caps; 3301 u16 offset, caps;
3298 u16 alt_san_mac_blk_offset; 3302 u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..2ae5d4b8fc93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
39s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); 39s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
40s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 40s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
41s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 41s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
42 u32 pba_num_size); 42 u32 pba_num_size);
43s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 43s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
44enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); 44enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
45enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); 45enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
61s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 61s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
62 u16 words, u16 *data); 62 u16 words, u16 *data);
63s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 63s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
64 u16 *data); 64 u16 *data);
65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
66 u16 words, u16 *data); 66 u16 words, u16 *data);
67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); 67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
69 u16 *checksum_val); 69 u16 *checksum_val);
70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
71 71
72s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 72s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
73 u32 enable_addr); 73 u32 enable_addr);
74s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 74s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
75s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 75s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
76s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 76s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
92s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 92s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
93s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); 93s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
94s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, 94s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
95 u32 vind, bool vlan_on); 95 u32 vind, bool vlan_on);
96s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); 96s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
97s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 97s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
98 ixgbe_link_speed *speed, 98 ixgbe_link_speed *speed,
99 bool *link_up, bool link_up_wait_to_complete); 99 bool *link_up, bool link_up_wait_to_complete);
100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
101 u16 *wwpn_prefix); 101 u16 *wwpn_prefix);
102 102
103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); 103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); 104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
141 return unlikely(!addr); 141 return unlikely(!addr);
142} 142}
143 143
144void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
145
146static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) 144static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
147{ 145{
148 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); 146 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
172} 170}
173#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) 171#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
174 172
175static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) 173u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
176{
177 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
178 u32 value;
179
180 if (ixgbe_removed(reg_addr))
181 return IXGBE_FAILED_READ_REG;
182 value = readl(reg_addr + reg);
183 if (unlikely(value == IXGBE_FAILED_READ_REG))
184 ixgbe_check_remove(hw, reg);
185 return value;
186}
187#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) 174#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
188 175
189#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ 176#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e000131b..a689ee0d4bed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
267 * Configure dcb settings and enable dcb mode. 267 * Configure dcb settings and enable dcb mode.
268 */ 268 */
269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, 269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
270 struct ixgbe_dcb_config *dcb_config) 270 struct ixgbe_dcb_config *dcb_config)
271{ 271{
272 s32 ret = 0; 272 s32 ret = 0;
273 u8 pfc_en; 273 u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
389 for (i = 0; i < MAX_USER_PRIORITY; i++) 389 for (i = 0; i < MAX_USER_PRIORITY; i++)
390 map[i] = IXGBE_RTRUP2TC_UP_MASK & 390 map[i] = IXGBE_RTRUP2TC_UP_MASK &
391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
392 return;
393} 392}
394 393
395void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) 394void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
208 208
209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); 209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
210 210
211 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
212 /* Configure PFC Tx thresholds per TC */ 211 /* Configure PFC Tx thresholds per TC */
213 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 212 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
214 if (!(pfc_en & (1 << i))) { 213 if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
217 continue; 216 continue;
218 } 217 }
219 218
219 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
220 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 220 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
221 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 221 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
222 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 222 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
242 max_tc = prio_tc[i]; 242 max_tc = prio_tc[i];
243 } 243 }
244 244
245 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
246 245
247 /* Configure PFC Tx thresholds per TC */ 246 /* Configure PFC Tx thresholds per TC */
248 for (i = 0; i <= max_tc; i++) { 247 for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
257 256
258 if (enabled) { 257 if (enabled) {
259 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 258 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
259 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
260 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 260 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
261 } else { 261 } else {
262 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; 262 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d5a1e3db0774..90c370230e20 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -31,17 +31,17 @@
31 31
32/* DCB register definitions */ 32/* DCB register definitions */
33#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, 33#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
34 * 1 WSP - Weighted Strict Priority 34 * 1 WSP - Weighted Strict Priority
35 */ 35 */
36#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, 36#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
37 * 1 WRR - Weighted Round Robin 37 * 1 WRR - Weighted Round Robin
38 */ 38 */
39#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ 39#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
40#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ 40#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
41#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ 41#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
42#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must 42#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
43 * clear! 43 * clear!
44 */ 44 */
45#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ 45#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
46 46
47/* Receive UP2TC mapping */ 47/* Receive UP2TC mapping */
@@ -56,11 +56,11 @@
56#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ 56#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
57 57
58#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet 58#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
59 * buffers enable 59 * buffers enable
60 */ 60 */
61#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores 61#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
62 * (RSS) enable 62 * (RSS) enable
63 */ 63 */
64 64
65/* RTRPCS Bit Masks */ 65/* RTRPCS Bit Masks */
66#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ 66#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
81 81
82/* RTTPCS Bit Masks */ 82/* RTTPCS Bit Masks */
83#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, 83#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
84 * 1 SP - Strict Priority 84 * 1 SP - Strict Priority
85 */ 85 */
86#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ 86#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
87#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ 87#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
88#define IXGBE_RTTPCS_ARBD_SHIFT 22 88#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1ef27f..5172b6b12c09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
192} 192}
193 193
194static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, 194static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
195 u8 prio, u8 bwg_id, u8 bw_pct, 195 u8 prio, u8 bwg_id, u8 bw_pct,
196 u8 up_map) 196 u8 up_map)
197{ 197{
198 struct ixgbe_adapter *adapter = netdev_priv(netdev); 198 struct ixgbe_adapter *adapter = netdev_priv(netdev);
199 199
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
210} 210}
211 211
212static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 212static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
213 u8 bw_pct) 213 u8 bw_pct)
214{ 214{
215 struct ixgbe_adapter *adapter = netdev_priv(netdev); 215 struct ixgbe_adapter *adapter = netdev_priv(netdev);
216 216
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
218} 218}
219 219
220static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 220static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
221 u8 prio, u8 bwg_id, u8 bw_pct, 221 u8 prio, u8 bwg_id, u8 bw_pct,
222 u8 up_map) 222 u8 up_map)
223{ 223{
224 struct ixgbe_adapter *adapter = netdev_priv(netdev); 224 struct ixgbe_adapter *adapter = netdev_priv(netdev);
225 225
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
236} 236}
237 237
238static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 238static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
239 u8 bw_pct) 239 u8 bw_pct)
240{ 240{
241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 241 struct ixgbe_adapter *adapter = netdev_priv(netdev);
242 242
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
244} 244}
245 245
246static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 246static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
247 u8 *prio, u8 *bwg_id, u8 *bw_pct, 247 u8 *prio, u8 *bwg_id, u8 *bw_pct,
248 u8 *up_map) 248 u8 *up_map)
249{ 249{
250 struct ixgbe_adapter *adapter = netdev_priv(netdev); 250 struct ixgbe_adapter *adapter = netdev_priv(netdev);
251 251
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
256} 256}
257 257
258static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 258static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
259 u8 *bw_pct) 259 u8 *bw_pct)
260{ 260{
261 struct ixgbe_adapter *adapter = netdev_priv(netdev); 261 struct ixgbe_adapter *adapter = netdev_priv(netdev);
262 262
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
264} 264}
265 265
266static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, 266static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
267 u8 *prio, u8 *bwg_id, u8 *bw_pct, 267 u8 *prio, u8 *bwg_id, u8 *bw_pct,
268 u8 *up_map) 268 u8 *up_map)
269{ 269{
270 struct ixgbe_adapter *adapter = netdev_priv(netdev); 270 struct ixgbe_adapter *adapter = netdev_priv(netdev);
271 271
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
276} 276}
277 277
278static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 278static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
279 u8 *bw_pct) 279 u8 *bw_pct)
280{ 280{
281 struct ixgbe_adapter *adapter = netdev_priv(netdev); 281 struct ixgbe_adapter *adapter = netdev_priv(netdev);
282 282
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
284} 284}
285 285
286static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 286static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
287 u8 setting) 287 u8 setting)
288{ 288{
289 struct ixgbe_adapter *adapter = netdev_priv(netdev); 289 struct ixgbe_adapter *adapter = netdev_priv(netdev);
290 290
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
295} 295}
296 296
297static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 297static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
298 u8 *setting) 298 u8 *setting)
299{ 299{
300 struct ixgbe_adapter *adapter = netdev_priv(netdev); 300 struct ixgbe_adapter *adapter = netdev_priv(netdev);
301 301
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 472b0f450bf9..5e2c1e35e517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
253 **/ 253 **/
254void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) 254void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
255{ 255{
256 if (adapter->ixgbe_dbg_adapter) 256 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
257 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
258 adapter->ixgbe_dbg_adapter = NULL; 257 adapter->ixgbe_dbg_adapter = NULL;
259} 258}
260 259
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6c55c14d082a..a452730a3278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
142 / sizeof(u64)) 142 / sizeof(u64))
143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 IXGBE_PB_STATS_LEN + \ 144 IXGBE_PB_STATS_LEN + \
145 IXGBE_QUEUE_STATS_LEN) 145 IXGBE_QUEUE_STATS_LEN)
146 146
147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
148 "Register test (offline)", "Eeprom test (offline)", 148 "Register test (offline)", "Eeprom test (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
153 153
154static int ixgbe_get_settings(struct net_device *netdev, 154static int ixgbe_get_settings(struct net_device *netdev,
155 struct ethtool_cmd *ecmd) 155 struct ethtool_cmd *ecmd)
156{ 156{
157 struct ixgbe_adapter *adapter = netdev_priv(netdev); 157 struct ixgbe_adapter *adapter = netdev_priv(netdev);
158 struct ixgbe_hw *hw = &adapter->hw; 158 struct ixgbe_hw *hw = &adapter->hw;
@@ -161,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
161 bool autoneg = false; 161 bool autoneg = false;
162 bool link_up; 162 bool link_up;
163 163
164 /* SFP type is needed for get_link_capabilities */
165 if (hw->phy.media_type & (ixgbe_media_type_fiber |
166 ixgbe_media_type_fiber_qsfp)) {
167 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
168 hw->phy.ops.identify_sfp(hw);
169 }
170
171 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 164 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
172 165
173 /* set the supported link speeds */ 166 /* set the supported link speeds */
@@ -303,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
303 } 296 }
304 ecmd->duplex = DUPLEX_FULL; 297 ecmd->duplex = DUPLEX_FULL;
305 } else { 298 } else {
306 ethtool_cmd_speed_set(ecmd, -1); 299 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
307 ecmd->duplex = -1; 300 ecmd->duplex = DUPLEX_UNKNOWN;
308 } 301 }
309 302
310 return 0; 303 return 0;
311} 304}
312 305
313static int ixgbe_set_settings(struct net_device *netdev, 306static int ixgbe_set_settings(struct net_device *netdev,
314 struct ethtool_cmd *ecmd) 307 struct ethtool_cmd *ecmd)
315{ 308{
316 struct ixgbe_adapter *adapter = netdev_priv(netdev); 309 struct ixgbe_adapter *adapter = netdev_priv(netdev);
317 struct ixgbe_hw *hw = &adapter->hw; 310 struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
368} 361}
369 362
370static void ixgbe_get_pauseparam(struct net_device *netdev, 363static void ixgbe_get_pauseparam(struct net_device *netdev,
371 struct ethtool_pauseparam *pause) 364 struct ethtool_pauseparam *pause)
372{ 365{
373 struct ixgbe_adapter *adapter = netdev_priv(netdev); 366 struct ixgbe_adapter *adapter = netdev_priv(netdev);
374 struct ixgbe_hw *hw = &adapter->hw; 367 struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
390} 383}
391 384
392static int ixgbe_set_pauseparam(struct net_device *netdev, 385static int ixgbe_set_pauseparam(struct net_device *netdev,
393 struct ethtool_pauseparam *pause) 386 struct ethtool_pauseparam *pause)
394{ 387{
395 struct ixgbe_adapter *adapter = netdev_priv(netdev); 388 struct ixgbe_adapter *adapter = netdev_priv(netdev);
396 struct ixgbe_hw *hw = &adapter->hw; 389 struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +443,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
450#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 443#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
451 444
452static void ixgbe_get_regs(struct net_device *netdev, 445static void ixgbe_get_regs(struct net_device *netdev,
453 struct ethtool_regs *regs, void *p) 446 struct ethtool_regs *regs, void *p)
454{ 447{
455 struct ixgbe_adapter *adapter = netdev_priv(netdev); 448 struct ixgbe_adapter *adapter = netdev_priv(netdev);
456 struct ixgbe_hw *hw = &adapter->hw; 449 struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
812} 805}
813 806
814static int ixgbe_get_eeprom(struct net_device *netdev, 807static int ixgbe_get_eeprom(struct net_device *netdev,
815 struct ethtool_eeprom *eeprom, u8 *bytes) 808 struct ethtool_eeprom *eeprom, u8 *bytes)
816{ 809{
817 struct ixgbe_adapter *adapter = netdev_priv(netdev); 810 struct ixgbe_adapter *adapter = netdev_priv(netdev);
818 struct ixgbe_hw *hw = &adapter->hw; 811 struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +911,7 @@ err:
918} 911}
919 912
920static void ixgbe_get_drvinfo(struct net_device *netdev, 913static void ixgbe_get_drvinfo(struct net_device *netdev,
921 struct ethtool_drvinfo *drvinfo) 914 struct ethtool_drvinfo *drvinfo)
922{ 915{
923 struct ixgbe_adapter *adapter = netdev_priv(netdev); 916 struct ixgbe_adapter *adapter = netdev_priv(netdev);
924 u32 nvm_track_id; 917 u32 nvm_track_id;
@@ -940,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
940} 933}
941 934
942static void ixgbe_get_ringparam(struct net_device *netdev, 935static void ixgbe_get_ringparam(struct net_device *netdev,
943 struct ethtool_ringparam *ring) 936 struct ethtool_ringparam *ring)
944{ 937{
945 struct ixgbe_adapter *adapter = netdev_priv(netdev); 938 struct ixgbe_adapter *adapter = netdev_priv(netdev);
946 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 939 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
953} 946}
954 947
955static int ixgbe_set_ringparam(struct net_device *netdev, 948static int ixgbe_set_ringparam(struct net_device *netdev,
956 struct ethtool_ringparam *ring) 949 struct ethtool_ringparam *ring)
957{ 950{
958 struct ixgbe_adapter *adapter = netdev_priv(netdev); 951 struct ixgbe_adapter *adapter = netdev_priv(netdev);
959 struct ixgbe_ring *temp_ring; 952 struct ixgbe_ring *temp_ring;
@@ -1082,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1082} 1075}
1083 1076
1084static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1077static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1085 struct ethtool_stats *stats, u64 *data) 1078 struct ethtool_stats *stats, u64 *data)
1086{ 1079{
1087 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1088 struct rtnl_link_stats64 temp; 1081 struct rtnl_link_stats64 temp;
@@ -1110,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1110 } 1103 }
1111 1104
1112 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1105 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1113 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1106 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1114 } 1107 }
1115 for (j = 0; j < netdev->num_tx_queues; j++) { 1108 for (j = 0; j < netdev->num_tx_queues; j++) {
1116 ring = adapter->tx_ring[j]; 1109 ring = adapter->tx_ring[j];
@@ -1180,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1180} 1173}
1181 1174
1182static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1175static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1183 u8 *data) 1176 u8 *data)
1184{ 1177{
1185 char *p = (char *)data; 1178 char *p = (char *)data;
1186 int i; 1179 int i;
@@ -1357,8 +1350,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1357 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); 1350 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1358 val = ixgbe_read_reg(&adapter->hw, reg); 1351 val = ixgbe_read_reg(&adapter->hw, reg);
1359 if (val != (test_pattern[pat] & write & mask)) { 1352 if (val != (test_pattern[pat] & write & mask)) {
1360 e_err(drv, "pattern test reg %04X failed: got " 1353 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1361 "0x%08X expected 0x%08X\n",
1362 reg, val, (test_pattern[pat] & write & mask)); 1354 reg, val, (test_pattern[pat] & write & mask));
1363 *data = reg; 1355 *data = reg;
1364 ixgbe_write_reg(&adapter->hw, reg, before); 1356 ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1374,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1382 ixgbe_write_reg(&adapter->hw, reg, write & mask); 1374 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1383 val = ixgbe_read_reg(&adapter->hw, reg); 1375 val = ixgbe_read_reg(&adapter->hw, reg);
1384 if ((write & mask) != (val & mask)) { 1376 if ((write & mask) != (val & mask)) {
1385 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1377 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1386 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1378 reg, (val & mask), (write & mask));
1387 *data = reg; 1379 *data = reg;
1388 ixgbe_write_reg(&adapter->hw, reg, before); 1380 ixgbe_write_reg(&adapter->hw, reg, before);
1389 return true; 1381 return true;
@@ -1430,8 +1422,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1430 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); 1422 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1431 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; 1423 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1432 if (value != after) { 1424 if (value != after) {
1433 e_err(drv, "failed STATUS register test got: 0x%08X " 1425 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1434 "expected: 0x%08X\n", after, value); 1426 after, value);
1435 *data = 1; 1427 *data = 1;
1436 return 1; 1428 return 1;
1437 } 1429 }
@@ -1533,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1533 return -1; 1525 return -1;
1534 } 1526 }
1535 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1527 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1536 netdev->name, netdev)) { 1528 netdev->name, netdev)) {
1537 shared_int = false; 1529 shared_int = false;
1538 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1530 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1539 netdev->name, netdev)) { 1531 netdev->name, netdev)) {
1540 *data = 1; 1532 *data = 1;
1541 return -1; 1533 return -1;
1542 } 1534 }
@@ -1563,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1563 */ 1555 */
1564 adapter->test_icr = 0; 1556 adapter->test_icr = 0;
1565 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1566 ~mask & 0x00007FFF); 1558 ~mask & 0x00007FFF);
1567 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1559 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1568 ~mask & 0x00007FFF); 1560 ~mask & 0x00007FFF);
1569 IXGBE_WRITE_FLUSH(&adapter->hw); 1561 IXGBE_WRITE_FLUSH(&adapter->hw);
1570 usleep_range(10000, 20000); 1562 usleep_range(10000, 20000);
1571 1563
@@ -1587,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1587 IXGBE_WRITE_FLUSH(&adapter->hw); 1579 IXGBE_WRITE_FLUSH(&adapter->hw);
1588 usleep_range(10000, 20000); 1580 usleep_range(10000, 20000);
1589 1581
1590 if (!(adapter->test_icr &mask)) { 1582 if (!(adapter->test_icr & mask)) {
1591 *data = 4; 1583 *data = 4;
1592 break; 1584 break;
1593 } 1585 }
@@ -1602,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1602 */ 1594 */
1603 adapter->test_icr = 0; 1595 adapter->test_icr = 0;
1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1605 ~mask & 0x00007FFF); 1597 ~mask & 0x00007FFF);
1606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1607 ~mask & 0x00007FFF); 1599 ~mask & 0x00007FFF);
1608 IXGBE_WRITE_FLUSH(&adapter->hw); 1600 IXGBE_WRITE_FLUSH(&adapter->hw);
1609 usleep_range(10000, 20000); 1601 usleep_range(10000, 20000);
1610 1602
@@ -1964,7 +1956,7 @@ out:
1964} 1956}
1965 1957
1966static void ixgbe_diag_test(struct net_device *netdev, 1958static void ixgbe_diag_test(struct net_device *netdev,
1967 struct ethtool_test *eth_test, u64 *data) 1959 struct ethtool_test *eth_test, u64 *data)
1968{ 1960{
1969 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1961 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1970 bool if_running = netif_running(netdev); 1962 bool if_running = netif_running(netdev);
@@ -1987,10 +1979,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1987 int i; 1979 int i;
1988 for (i = 0; i < adapter->num_vfs; i++) { 1980 for (i = 0; i < adapter->num_vfs; i++) {
1989 if (adapter->vfinfo[i].clear_to_send) { 1981 if (adapter->vfinfo[i].clear_to_send) {
1990 netdev_warn(netdev, "%s", 1982 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
1991 "offline diagnostic is not "
1992 "supported when VFs are "
1993 "present\n");
1994 data[0] = 1; 1983 data[0] = 1;
1995 data[1] = 1; 1984 data[1] = 1;
1996 data[2] = 1; 1985 data[2] = 1;
@@ -2037,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
2037 * loopback diagnostic. */ 2026 * loopback diagnostic. */
2038 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 2027 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2039 IXGBE_FLAG_VMDQ_ENABLED)) { 2028 IXGBE_FLAG_VMDQ_ENABLED)) {
2040 e_info(hw, "Skip MAC loopback diagnostic in VT " 2029 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2041 "mode\n");
2042 data[3] = 0; 2030 data[3] = 0;
2043 goto skip_loopback; 2031 goto skip_loopback;
2044 } 2032 }
@@ -2078,7 +2066,7 @@ skip_ol_tests:
2078} 2066}
2079 2067
2080static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 2068static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2081 struct ethtool_wolinfo *wol) 2069 struct ethtool_wolinfo *wol)
2082{ 2070{
2083 struct ixgbe_hw *hw = &adapter->hw; 2071 struct ixgbe_hw *hw = &adapter->hw;
2084 int retval = 0; 2072 int retval = 0;
@@ -2094,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2094} 2082}
2095 2083
2096static void ixgbe_get_wol(struct net_device *netdev, 2084static void ixgbe_get_wol(struct net_device *netdev,
2097 struct ethtool_wolinfo *wol) 2085 struct ethtool_wolinfo *wol)
2098{ 2086{
2099 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2087 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2100 2088
2101 wol->supported = WAKE_UCAST | WAKE_MCAST | 2089 wol->supported = WAKE_UCAST | WAKE_MCAST |
2102 WAKE_BCAST | WAKE_MAGIC; 2090 WAKE_BCAST | WAKE_MAGIC;
2103 wol->wolopts = 0; 2091 wol->wolopts = 0;
2104 2092
2105 if (ixgbe_wol_exclusion(adapter, wol) || 2093 if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
2181} 2169}
2182 2170
2183static int ixgbe_get_coalesce(struct net_device *netdev, 2171static int ixgbe_get_coalesce(struct net_device *netdev,
2184 struct ethtool_coalesce *ec) 2172 struct ethtool_coalesce *ec)
2185{ 2173{
2186 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2174 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2187 2175
@@ -2222,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2222 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2210 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2223 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2211 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2224 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2212 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2225 e_info(probe, "rx-usecs value high enough " 2213 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2226 "to re-enable RSC\n");
2227 return true; 2214 return true;
2228 } 2215 }
2229 /* if interrupt rate is too high then disable RSC */ 2216 /* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2236} 2223}
2237 2224
2238static int ixgbe_set_coalesce(struct net_device *netdev, 2225static int ixgbe_set_coalesce(struct net_device *netdev,
2239 struct ethtool_coalesce *ec) 2226 struct ethtool_coalesce *ec)
2240{ 2227{
2241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2228 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2242 struct ixgbe_q_vector *q_vector; 2229 struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2421 switch (cmd->flow_type) { 2408 switch (cmd->flow_type) {
2422 case TCP_V4_FLOW: 2409 case TCP_V4_FLOW:
2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2410 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2411 /* fallthrough */
2424 case UDP_V4_FLOW: 2412 case UDP_V4_FLOW:
2425 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2413 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2426 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2414 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2415 /* fallthrough */
2427 case SCTP_V4_FLOW: 2416 case SCTP_V4_FLOW:
2428 case AH_ESP_V4_FLOW: 2417 case AH_ESP_V4_FLOW:
2429 case AH_V4_FLOW: 2418 case AH_V4_FLOW:
@@ -2433,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2433 break; 2422 break;
2434 case TCP_V6_FLOW: 2423 case TCP_V6_FLOW:
2435 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2424 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2425 /* fallthrough */
2436 case UDP_V6_FLOW: 2426 case UDP_V6_FLOW:
2437 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2427 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2438 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2428 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2429 /* fallthrough */
2439 case SCTP_V6_FLOW: 2430 case SCTP_V6_FLOW:
2440 case AH_ESP_V6_FLOW: 2431 case AH_ESP_V6_FLOW:
2441 case AH_V6_FLOW: 2432 case AH_V6_FLOW:
@@ -2787,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2787 2778
2788 if ((flags2 & UDP_RSS_FLAGS) && 2779 if ((flags2 & UDP_RSS_FLAGS) &&
2789 !(adapter->flags2 & UDP_RSS_FLAGS)) 2780 !(adapter->flags2 & UDP_RSS_FLAGS))
2790 e_warn(drv, "enabling UDP RSS: fragmented packets" 2781 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2791 " may arrive out of order to the stack above\n");
2792 2782
2793 adapter->flags2 = flags2; 2783 adapter->flags2 = flags2;
2794 2784
@@ -3099,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
3099 3089
3100void ixgbe_set_ethtool_ops(struct net_device *netdev) 3090void ixgbe_set_ethtool_ops(struct net_device *netdev)
3101{ 3091{
3102 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 3092 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3103} 3093}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
81 void *extra_ddp_buffer; 81 void *extra_ddp_buffer;
82 dma_addr_t extra_ddp_buffer_dma; 82 dma_addr_t extra_ddp_buffer_dma;
83 unsigned long mode; 83 unsigned long mode;
84#ifdef CONFIG_IXGBE_DCB
85 u8 up; 84 u8 up;
86#endif
87}; 85};
88 86
89#endif /* _IXGBE_FCOE_H */ 87#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2067d392cc3d..2d9451e39686 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1113 err = pci_enable_msi(adapter->pdev); 1113 err = pci_enable_msi(adapter->pdev);
1114 if (err) { 1114 if (err) {
1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
1116 "Unable to allocate MSI interrupt, " 1116 "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1117 "falling back to legacy. Error: %d\n", err); 1117 err);
1118 return; 1118 return;
1119 } 1119 }
1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c047c3ef8d71..f5aa3311ea28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
301 ixgbe_service_event_schedule(adapter); 301 ixgbe_service_event_schedule(adapter);
302} 302}
303 303
304void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 304static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
305{ 305{
306 u32 value; 306 u32 value;
307 307
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
320 ixgbe_remove_adapter(hw); 320 ixgbe_remove_adapter(hw);
321} 321}
322 322
323/**
324 * ixgbe_read_reg - Read from device register
325 * @hw: hw specific details
326 * @reg: offset of register to read
327 *
328 * Returns : value read or IXGBE_FAILED_READ_REG if removed
329 *
330 * This function is used to read device registers. It checks for device
331 * removal by confirming any read that returns all ones by checking the
332 * status register value for all ones. This function avoids reading from
333 * the hardware if a removal was previously detected in which case it
334 * returns IXGBE_FAILED_READ_REG (all ones).
335 */
336u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
337{
338 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
339 u32 value;
340
341 if (ixgbe_removed(reg_addr))
342 return IXGBE_FAILED_READ_REG;
343 value = readl(reg_addr + reg);
344 if (unlikely(value == IXGBE_FAILED_READ_REG))
345 ixgbe_check_remove(hw, reg);
346 return value;
347}
348
323static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) 349static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
324{ 350{
325 u16 value; 351 u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3743} 3769}
3744 3770
3745/** 3771/**
3746 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3747 * @adapter: driver data
3748 */
3749static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3750{
3751 struct ixgbe_hw *hw = &adapter->hw;
3752 u32 vlnctrl;
3753
3754 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3755 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3756 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3757}
3758
3759/**
3760 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3761 * @adapter: driver data
3762 */
3763static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3764{
3765 struct ixgbe_hw *hw = &adapter->hw;
3766 u32 vlnctrl;
3767
3768 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3769 vlnctrl |= IXGBE_VLNCTRL_VFE;
3770 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3771 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3772}
3773
3774/**
3775 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping 3772 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3776 * @adapter: driver data 3773 * @adapter: driver data
3777 */ 3774 */
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3850} 3847}
3851 3848
3852/** 3849/**
3850 * ixgbe_write_mc_addr_list - write multicast addresses to MTA
3851 * @netdev: network interface device structure
3852 *
3853 * Writes multicast address list to the MTA hash table.
3854 * Returns: -ENOMEM on failure
3855 * 0 on no addresses written
3856 * X on writing X addresses to MTA
3857 **/
3858static int ixgbe_write_mc_addr_list(struct net_device *netdev)
3859{
3860 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3861 struct ixgbe_hw *hw = &adapter->hw;
3862
3863 if (!netif_running(netdev))
3864 return 0;
3865
3866 if (hw->mac.ops.update_mc_addr_list)
3867 hw->mac.ops.update_mc_addr_list(hw, netdev);
3868 else
3869 return -ENOMEM;
3870
3871#ifdef CONFIG_PCI_IOV
3872 ixgbe_restore_vf_multicasts(adapter);
3873#endif
3874
3875 return netdev_mc_count(netdev);
3876}
3877
3878#ifdef CONFIG_PCI_IOV
3879void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
3880{
3881 struct ixgbe_hw *hw = &adapter->hw;
3882 int i;
3883 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3884 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3885 hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
3886 adapter->mac_table[i].queue,
3887 IXGBE_RAH_AV);
3888 else
3889 hw->mac.ops.clear_rar(hw, i);
3890
3891 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
3892 }
3893}
3894#endif
3895
3896static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
3897{
3898 struct ixgbe_hw *hw = &adapter->hw;
3899 int i;
3900 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3901 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
3902 if (adapter->mac_table[i].state &
3903 IXGBE_MAC_STATE_IN_USE)
3904 hw->mac.ops.set_rar(hw, i,
3905 adapter->mac_table[i].addr,
3906 adapter->mac_table[i].queue,
3907 IXGBE_RAH_AV);
3908 else
3909 hw->mac.ops.clear_rar(hw, i);
3910
3911 adapter->mac_table[i].state &=
3912 ~(IXGBE_MAC_STATE_MODIFIED);
3913 }
3914 }
3915}
3916
3917static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
3918{
3919 int i;
3920 struct ixgbe_hw *hw = &adapter->hw;
3921
3922 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3923 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3924 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3925 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
3926 adapter->mac_table[i].queue = 0;
3927 }
3928 ixgbe_sync_mac_table(adapter);
3929}
3930
3931static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
3932{
3933 struct ixgbe_hw *hw = &adapter->hw;
3934 int i, count = 0;
3935
3936 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3937 if (adapter->mac_table[i].state == 0)
3938 count++;
3939 }
3940 return count;
3941}
3942
3943/* this function destroys the first RAR entry */
3944static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
3945 u8 *addr)
3946{
3947 struct ixgbe_hw *hw = &adapter->hw;
3948
3949 memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
3950 adapter->mac_table[0].queue = VMDQ_P(0);
3951 adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
3952 IXGBE_MAC_STATE_IN_USE);
3953 hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
3954 adapter->mac_table[0].queue,
3955 IXGBE_RAH_AV);
3956}
3957
3958int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
3959{
3960 struct ixgbe_hw *hw = &adapter->hw;
3961 int i;
3962
3963 if (is_zero_ether_addr(addr))
3964 return -EINVAL;
3965
3966 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3967 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3968 continue;
3969 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
3970 IXGBE_MAC_STATE_IN_USE);
3971 ether_addr_copy(adapter->mac_table[i].addr, addr);
3972 adapter->mac_table[i].queue = queue;
3973 ixgbe_sync_mac_table(adapter);
3974 return i;
3975 }
3976 return -ENOMEM;
3977}
3978
3979int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
3980{
3981 /* search table for addr, if found, set to 0 and sync */
3982 int i;
3983 struct ixgbe_hw *hw = &adapter->hw;
3984
3985 if (is_zero_ether_addr(addr))
3986 return -EINVAL;
3987
3988 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3989 if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
3990 adapter->mac_table[i].queue == queue) {
3991 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3992 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3993 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
3994 adapter->mac_table[i].queue = 0;
3995 ixgbe_sync_mac_table(adapter);
3996 return 0;
3997 }
3998 }
3999 return -ENOMEM;
4000}
4001/**
3853 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table 4002 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3854 * @netdev: network interface device structure 4003 * @netdev: network interface device structure
3855 * 4004 *
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3858 * 0 on no addresses written 4007 * 0 on no addresses written
3859 * X on writing X addresses to the RAR table 4008 * X on writing X addresses to the RAR table
3860 **/ 4009 **/
3861static int ixgbe_write_uc_addr_list(struct net_device *netdev) 4010static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
3862{ 4011{
3863 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4012 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3864 struct ixgbe_hw *hw = &adapter->hw;
3865 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3866 int count = 0; 4013 int count = 0;
3867 4014
3868 /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
3869 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3870 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3871
3872 /* return ENOMEM indicating insufficient memory for addresses */ 4015 /* return ENOMEM indicating insufficient memory for addresses */
3873 if (netdev_uc_count(netdev) > rar_entries) 4016 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
3874 return -ENOMEM; 4017 return -ENOMEM;
3875 4018
3876 if (!netdev_uc_empty(netdev)) { 4019 if (!netdev_uc_empty(netdev)) {
3877 struct netdev_hw_addr *ha; 4020 struct netdev_hw_addr *ha;
3878 /* return error if we do not support writing to RAR table */
3879 if (!hw->mac.ops.set_rar)
3880 return -ENOMEM;
3881
3882 netdev_for_each_uc_addr(ha, netdev) { 4021 netdev_for_each_uc_addr(ha, netdev) {
3883 if (!rar_entries) 4022 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
3884 break; 4023 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
3885 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3886 VMDQ_P(0), IXGBE_RAH_AV);
3887 count++; 4024 count++;
3888 } 4025 }
3889 } 4026 }
3890 /* write the addresses in reverse order to avoid write combining */
3891 for (; rar_entries > 0 ; rar_entries--)
3892 hw->mac.ops.clear_rar(hw, rar_entries);
3893
3894 return count; 4027 return count;
3895} 4028}
3896 4029
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3908 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4041 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3909 struct ixgbe_hw *hw = &adapter->hw; 4042 struct ixgbe_hw *hw = &adapter->hw;
3910 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 4043 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4044 u32 vlnctrl;
3911 int count; 4045 int count;
3912 4046
3913 /* Check for Promiscuous and All Multicast modes */ 4047 /* Check for Promiscuous and All Multicast modes */
3914
3915 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4048 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4049 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3916 4050
3917 /* set all bits that we expect to always be set */ 4051 /* set all bits that we expect to always be set */
3918 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ 4052 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3922 4056
3923 /* clear the bits we are changing the status of */ 4057 /* clear the bits we are changing the status of */
3924 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4058 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3925 4059 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3926 if (netdev->flags & IFF_PROMISC) { 4060 if (netdev->flags & IFF_PROMISC) {
3927 hw->addr_ctrl.user_set_promisc = true; 4061 hw->addr_ctrl.user_set_promisc = true;
3928 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4062 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3929 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); 4063 vmolr |= IXGBE_VMOLR_MPE;
3930 /* Only disable hardware filter vlans in promiscuous mode 4064 /* Only disable hardware filter vlans in promiscuous mode
3931 * if SR-IOV and VMDQ are disabled - otherwise ensure 4065 * if SR-IOV and VMDQ are disabled - otherwise ensure
3932 * that hardware VLAN filters remain enabled. 4066 * that hardware VLAN filters remain enabled.
3933 */ 4067 */
3934 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 4068 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3935 IXGBE_FLAG_SRIOV_ENABLED))) 4069 IXGBE_FLAG_SRIOV_ENABLED)))
3936 ixgbe_vlan_filter_disable(adapter); 4070 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3937 else
3938 ixgbe_vlan_filter_enable(adapter);
3939 } else { 4071 } else {
3940 if (netdev->flags & IFF_ALLMULTI) { 4072 if (netdev->flags & IFF_ALLMULTI) {
3941 fctrl |= IXGBE_FCTRL_MPE; 4073 fctrl |= IXGBE_FCTRL_MPE;
3942 vmolr |= IXGBE_VMOLR_MPE; 4074 vmolr |= IXGBE_VMOLR_MPE;
3943 } 4075 }
3944 ixgbe_vlan_filter_enable(adapter); 4076 vlnctrl |= IXGBE_VLNCTRL_VFE;
3945 hw->addr_ctrl.user_set_promisc = false; 4077 hw->addr_ctrl.user_set_promisc = false;
3946 } 4078 }
3947 4079
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3950 * sufficient space to store all the addresses then enable 4082 * sufficient space to store all the addresses then enable
3951 * unicast promiscuous mode 4083 * unicast promiscuous mode
3952 */ 4084 */
3953 count = ixgbe_write_uc_addr_list(netdev); 4085 count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
3954 if (count < 0) { 4086 if (count < 0) {
3955 fctrl |= IXGBE_FCTRL_UPE; 4087 fctrl |= IXGBE_FCTRL_UPE;
3956 vmolr |= IXGBE_VMOLR_ROPE; 4088 vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3960 * then we should just turn on promiscuous mode so 4092 * then we should just turn on promiscuous mode so
3961 * that we can at least receive multicast traffic 4093 * that we can at least receive multicast traffic
3962 */ 4094 */
3963 hw->mac.ops.update_mc_addr_list(hw, netdev); 4095 count = ixgbe_write_mc_addr_list(netdev);
3964 vmolr |= IXGBE_VMOLR_ROMPE; 4096 if (count < 0) {
3965 4097 fctrl |= IXGBE_FCTRL_MPE;
3966 if (adapter->num_vfs) 4098 vmolr |= IXGBE_VMOLR_MPE;
3967 ixgbe_restore_vf_multicasts(adapter); 4099 } else if (count) {
4100 vmolr |= IXGBE_VMOLR_ROMPE;
4101 }
3968 4102
3969 if (hw->mac.type != ixgbe_mac_82598EB) { 4103 if (hw->mac.type != ixgbe_mac_82598EB) {
3970 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & 4104 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3985 /* NOTE: VLAN filtering is disabled by setting PROMISC */ 4119 /* NOTE: VLAN filtering is disabled by setting PROMISC */
3986 } 4120 }
3987 4121
4122 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3988 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4123 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3989 4124
3990 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 4125 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4101 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 4236 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4102 (pb == ixgbe_fcoe_get_tc(adapter))) 4237 (pb == ixgbe_fcoe_get_tc(adapter)))
4103 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4238 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4104
4105#endif 4239#endif
4240
4106 /* Calculate delay value for device */ 4241 /* Calculate delay value for device */
4107 switch (hw->mac.type) { 4242 switch (hw->mac.type) {
4108 case ixgbe_mac_X540: 4243 case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4143 * @adapter: board private structure to calculate for 4278 * @adapter: board private structure to calculate for
4144 * @pb: packet buffer to calculate 4279 * @pb: packet buffer to calculate
4145 */ 4280 */
4146static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) 4281static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4147{ 4282{
4148 struct ixgbe_hw *hw = &adapter->hw; 4283 struct ixgbe_hw *hw = &adapter->hw;
4149 struct net_device *dev = adapter->netdev; 4284 struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
4153 /* Calculate max LAN frame size */ 4288 /* Calculate max LAN frame size */
4154 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 4289 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4155 4290
4291#ifdef IXGBE_FCOE
4292 /* FCoE traffic class uses FCOE jumbo frames */
4293 if ((dev->features & NETIF_F_FCOE_MTU) &&
4294 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4295 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4296 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4297#endif
4298
4156 /* Calculate delay value for device */ 4299 /* Calculate delay value for device */
4157 switch (hw->mac.type) { 4300 switch (hw->mac.type) {
4158 case ixgbe_mac_X540: 4301 case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4179 if (!num_tc) 4322 if (!num_tc)
4180 num_tc = 1; 4323 num_tc = 1;
4181 4324
4182 hw->fc.low_water = ixgbe_lpbthresh(adapter);
4183
4184 for (i = 0; i < num_tc; i++) { 4325 for (i = 0; i < num_tc; i++) {
4185 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); 4326 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4327 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4186 4328
4187 /* Low water marks must not be larger than high water marks */ 4329 /* Low water marks must not be larger than high water marks */
4188 if (hw->fc.low_water > hw->fc.high_water[i]) 4330 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4189 hw->fc.low_water = 0; 4331 hw->fc.low_water[i] = 0;
4190 } 4332 }
4333
4334 for (; i < MAX_TRAFFIC_CLASS; i++)
4335 hw->fc.high_water[i] = 0;
4191} 4336}
4192 4337
4193static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) 4338static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4249 vmolr |= IXGBE_VMOLR_ROMPE; 4394 vmolr |= IXGBE_VMOLR_ROMPE;
4250 hw->mac.ops.update_mc_addr_list(hw, dev); 4395 hw->mac.ops.update_mc_addr_list(hw, dev);
4251 } 4396 }
4252 ixgbe_write_uc_addr_list(adapter->netdev); 4397 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4253 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4398 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4254} 4399}
4255 4400
4256static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4257 u8 *addr, u16 pool)
4258{
4259 struct ixgbe_hw *hw = &adapter->hw;
4260 unsigned int entry;
4261
4262 entry = hw->mac.num_rar_entries - pool;
4263 hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
4264}
4265
4266static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 4401static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4267{ 4402{
4268 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4403 struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4521,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4521 case ixgbe_phy_qsfp_active_unknown: 4656 case ixgbe_phy_qsfp_active_unknown:
4522 case ixgbe_phy_qsfp_intel: 4657 case ixgbe_phy_qsfp_intel:
4523 case ixgbe_phy_qsfp_unknown: 4658 case ixgbe_phy_qsfp_unknown:
4659 /* ixgbe_phy_none is set when no SFP module is present */
4660 case ixgbe_phy_none:
4524 return true; 4661 return true;
4525 case ixgbe_phy_nl: 4662 case ixgbe_phy_nl:
4526 if (hw->mac.type == ixgbe_mac_82598EB) 4663 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4742,7 +4879,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
4742void ixgbe_reset(struct ixgbe_adapter *adapter) 4879void ixgbe_reset(struct ixgbe_adapter *adapter)
4743{ 4880{
4744 struct ixgbe_hw *hw = &adapter->hw; 4881 struct ixgbe_hw *hw = &adapter->hw;
4882 struct net_device *netdev = adapter->netdev;
4745 int err; 4883 int err;
4884 u8 old_addr[ETH_ALEN];
4746 4885
4747 if (ixgbe_removed(hw->hw_addr)) 4886 if (ixgbe_removed(hw->hw_addr))
4748 return; 4887 return;
@@ -4778,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4778 } 4917 }
4779 4918
4780 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 4919 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4781 4920 /* do not flush user set addresses */
4782 /* reprogram the RAR[0] in case user changed it. */ 4921 memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
4783 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 4922 ixgbe_flush_sw_mac_table(adapter);
4923 ixgbe_mac_set_default_filter(adapter, old_addr);
4784 4924
4785 /* update SAN MAC vmdq pool selection */ 4925 /* update SAN MAC vmdq pool selection */
4786 if (hw->mac.san_mac_rar_index) 4926 if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5026#endif /* CONFIG_IXGBE_DCB */ 5166#endif /* CONFIG_IXGBE_DCB */
5027#endif /* IXGBE_FCOE */ 5167#endif /* IXGBE_FCOE */
5028 5168
5169 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5170 hw->mac.num_rar_entries,
5171 GFP_ATOMIC);
5172
5029 /* Set MAC specific capability flags and exceptions */ 5173 /* Set MAC specific capability flags and exceptions */
5030 switch (hw->mac.type) { 5174 switch (hw->mac.type) {
5031 case ixgbe_mac_82598EB: 5175 case ixgbe_mac_82598EB:
@@ -5517,6 +5661,17 @@ err_setup_tx:
5517 return err; 5661 return err;
5518} 5662}
5519 5663
5664static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
5665{
5666 ixgbe_ptp_suspend(adapter);
5667
5668 ixgbe_down(adapter);
5669 ixgbe_free_irq(adapter);
5670
5671 ixgbe_free_all_tx_resources(adapter);
5672 ixgbe_free_all_rx_resources(adapter);
5673}
5674
5520/** 5675/**
5521 * ixgbe_close - Disables a network interface 5676 * ixgbe_close - Disables a network interface
5522 * @netdev: network interface device structure 5677 * @netdev: network interface device structure
@@ -5534,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
5534 5689
5535 ixgbe_ptp_stop(adapter); 5690 ixgbe_ptp_stop(adapter);
5536 5691
5537 ixgbe_down(adapter); 5692 ixgbe_close_suspend(adapter);
5538 ixgbe_free_irq(adapter);
5539 5693
5540 ixgbe_fdir_filter_exit(adapter); 5694 ixgbe_fdir_filter_exit(adapter);
5541 5695
5542 ixgbe_free_all_tx_resources(adapter);
5543 ixgbe_free_all_rx_resources(adapter);
5544
5545 ixgbe_release_hw_control(adapter); 5696 ixgbe_release_hw_control(adapter);
5546 5697
5547 return 0; 5698 return 0;
@@ -5608,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5608 netif_device_detach(netdev); 5759 netif_device_detach(netdev);
5609 5760
5610 rtnl_lock(); 5761 rtnl_lock();
5611 if (netif_running(netdev)) { 5762 if (netif_running(netdev))
5612 ixgbe_down(adapter); 5763 ixgbe_close_suspend(adapter);
5613 ixgbe_free_irq(adapter);
5614 ixgbe_free_all_tx_resources(adapter);
5615 ixgbe_free_all_rx_resources(adapter);
5616 }
5617 rtnl_unlock(); 5764 rtnl_unlock();
5618 5765
5619 ixgbe_clear_interrupt_scheme(adapter); 5766 ixgbe_clear_interrupt_scheme(adapter);
@@ -5945,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5945 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6092 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5946 for (i = 0; i < adapter->num_tx_queues; i++) 6093 for (i = 0; i < adapter->num_tx_queues; i++)
5947 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 6094 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5948 &(adapter->tx_ring[i]->state)); 6095 &(adapter->tx_ring[i]->state));
5949 /* re-enable flow director interrupts */ 6096 /* re-enable flow director interrupts */
5950 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 6097 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5951 } else { 6098 } else {
@@ -7172,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
7172 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7319 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7173 struct ixgbe_hw *hw = &adapter->hw; 7320 struct ixgbe_hw *hw = &adapter->hw;
7174 struct sockaddr *addr = p; 7321 struct sockaddr *addr = p;
7322 int ret;
7175 7323
7176 if (!is_valid_ether_addr(addr->sa_data)) 7324 if (!is_valid_ether_addr(addr->sa_data))
7177 return -EADDRNOTAVAIL; 7325 return -EADDRNOTAVAIL;
7178 7326
7327 ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7179 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 7328 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7180 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 7329 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7181 7330
7182 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 7331 ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7183 7332 return ret > 0 ? 0 : ret;
7184 return 0;
7185} 7333}
7186 7334
7187static int 7335static int
@@ -7783,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7783 .ndo_do_ioctl = ixgbe_ioctl, 7931 .ndo_do_ioctl = ixgbe_ioctl,
7784 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, 7932 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7785 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 7933 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7786 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7934 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
7787 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, 7935 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
7788 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7936 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7789 .ndo_get_stats64 = ixgbe_get_stats64, 7937 .ndo_get_stats64 = ixgbe_get_stats64,
@@ -8187,6 +8335,8 @@ skip_sriov:
8187 goto err_sw_init; 8335 goto err_sw_init;
8188 } 8336 }
8189 8337
8338 ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
8339
8190 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8340 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8191 (unsigned long) adapter); 8341 (unsigned long) adapter);
8192 8342
@@ -8242,7 +8392,7 @@ skip_sriov:
8242 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 8392 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8243 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 8393 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8244 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 8394 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8245 part_str); 8395 part_str);
8246 else 8396 else
8247 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 8397 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8248 hw->mac.type, hw->phy.type, part_str); 8398 hw->mac.type, hw->phy.type, part_str);
@@ -8304,8 +8454,8 @@ skip_sriov:
8304 8454
8305 ixgbe_dbg_adapter_init(adapter); 8455 ixgbe_dbg_adapter_init(adapter);
8306 8456
8307 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 8457 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
8308 if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link) 8458 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
8309 hw->mac.ops.setup_link(hw, 8459 hw->mac.ops.setup_link(hw,
8310 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 8460 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8311 true); 8461 true);
@@ -8319,6 +8469,7 @@ err_sw_init:
8319 ixgbe_disable_sriov(adapter); 8469 ixgbe_disable_sriov(adapter);
8320 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 8470 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
8321 iounmap(adapter->io_addr); 8471 iounmap(adapter->io_addr);
8472 kfree(adapter->mac_table);
8322err_ioremap: 8473err_ioremap:
8323 free_netdev(netdev); 8474 free_netdev(netdev);
8324err_alloc_etherdev: 8475err_alloc_etherdev:
@@ -8392,6 +8543,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
8392 8543
8393 e_dev_info("complete\n"); 8544 e_dev_info("complete\n");
8394 8545
8546 kfree(adapter->mac_table);
8395 free_netdev(netdev); 8547 free_netdev(netdev);
8396 8548
8397 pci_disable_pcie_error_reporting(pdev); 8549 pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index f5c6af2b891b..1918e0abf734 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -223,7 +223,7 @@ out:
223 * received an ack to that message within delay * timeout period 223 * received an ack to that message within delay * timeout period
224 **/ 224 **/
225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, 225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
226 u16 mbx_id) 226 u16 mbx_id)
227{ 227{
228 struct ixgbe_mbx_info *mbx = &hw->mbx; 228 struct ixgbe_mbx_info *mbx = &hw->mbx;
229 s32 ret_val = IXGBE_ERR_MBX; 229 s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
269 u32 vf_bit = vf_number % 16; 269 u32 vf_bit = vf_number % 16;
270 270
271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, 271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
272 index)) { 272 index)) {
273 ret_val = 0; 273 ret_val = 0;
274 hw->mbx.stats.reqs++; 274 hw->mbx.stats.reqs++;
275 } 275 }
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
291 u32 vf_bit = vf_number % 16; 291 u32 vf_bit = vf_number % 16;
292 292
293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, 293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
294 index)) { 294 index)) {
295 ret_val = 0; 295 ret_val = 0;
296 hw->mbx.stats.acks++; 296 hw->mbx.stats.acks++;
297 } 297 }
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
366 * returns SUCCESS if it successfully copied message into the buffer 366 * returns SUCCESS if it successfully copied message into the buffer
367 **/ 367 **/
368static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 368static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
369 u16 vf_number) 369 u16 vf_number)
370{ 370{
371 s32 ret_val; 371 s32 ret_val;
372 u16 i; 372 u16 i;
@@ -407,7 +407,7 @@ out_no_write:
407 * a message due to a VF request so no polling for message is needed. 407 * a message due to a VF request so no polling for message is needed.
408 **/ 408 **/
409static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 409static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
410 u16 vf_number) 410 u16 vf_number)
411{ 411{
412 s32 ret_val; 412 s32 ret_val;
413 u16 i; 413 u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a9b9ad69ed0e..a5cb755de3a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -54,11 +54,11 @@
54 * Message ACK's are the value or'd with 0xF0000000 54 * Message ACK's are the value or'd with 0xF0000000
55 */ 55 */
56#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 56#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
57 * this are the ACK */ 57 * this are the ACK */
58#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 58#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
59 * this are the NACK */ 59 * this are the NACK */
60#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 60#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
61 clear to send requests */ 61 clear to send requests */
62#define IXGBE_VT_MSGINFO_SHIFT 16 62#define IXGBE_VT_MSGINFO_SHIFT 16
63/* bits 23:16 are used for exra info for certain messages */ 63/* bits 23:16 are used for exra info for certain messages */
64#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 64#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index a76af8e28a04..ff68b7a9deff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { 67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
68 ixgbe_get_phy_id(hw); 68 ixgbe_get_phy_id(hw);
69 hw->phy.type = 69 hw->phy.type =
70 ixgbe_get_phy_type_from_id(hw->phy.id); 70 ixgbe_get_phy_type_from_id(hw->phy.id);
71 71
72 if (hw->phy.type == ixgbe_phy_unknown) { 72 if (hw->phy.type == ixgbe_phy_unknown) {
73 hw->phy.ops.read_reg(hw, 73 hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
136 u16 phy_id_low = 0; 136 u16 phy_id_low = 0;
137 137
138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, 138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
139 &phy_id_high); 139 &phy_id_high);
140 140
141 if (status == 0) { 141 if (status == 0) {
142 hw->phy.id = (u32)(phy_id_high << 16); 142 hw->phy.id = (u32)(phy_id_high << 16);
143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, 143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
144 &phy_id_low); 144 &phy_id_low);
145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
147 } 147 }
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
318 * @phy_data: Pointer to read data from PHY register 318 * @phy_data: Pointer to read data from PHY register
319 **/ 319 **/
320s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 320s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
321 u32 device_type, u16 *phy_data) 321 u32 device_type, u16 *phy_data)
322{ 322{
323 s32 status; 323 s32 status;
324 u16 gssr; 324 u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
421 * @phy_data: Data to write to the PHY register 421 * @phy_data: Data to write to the PHY register
422 **/ 422 **/
423s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 423s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
424 u32 device_type, u16 phy_data) 424 u32 device_type, u16 phy_data)
425{ 425{
426 s32 status; 426 s32 status;
427 u16 gssr; 427 u16 gssr;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
548 * @speed: new link speed 548 * @speed: new link speed
549 **/ 549 **/
550s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 550s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
551 ixgbe_link_speed speed, 551 ixgbe_link_speed speed,
552 bool autoneg_wait_to_complete) 552 bool autoneg_wait_to_complete)
553{ 553{
554 554
555 /* 555 /*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
582 * Determines the link capabilities by reading the AUTOC register. 582 * Determines the link capabilities by reading the AUTOC register.
583 */ 583 */
584s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 584s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
585 ixgbe_link_speed *speed, 585 ixgbe_link_speed *speed,
586 bool *autoneg) 586 bool *autoneg)
587{ 587{
588 s32 status = IXGBE_ERR_LINK_SETUP; 588 s32 status = IXGBE_ERR_LINK_SETUP;
589 u16 speed_ability; 589 u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
592 *autoneg = true; 592 *autoneg = true;
593 593
594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, 594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
595 &speed_ability); 595 &speed_ability);
596 596
597 if (status == 0) { 597 if (status == 0) {
598 if (speed_ability & MDIO_SPEED_10G) 598 if (speed_ability & MDIO_SPEED_10G)
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
806 806
807 /* reset the PHY and poll for completion */ 807 /* reset the PHY and poll for completion */
808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
809 (phy_data | MDIO_CTRL1_RESET)); 809 (phy_data | MDIO_CTRL1_RESET));
810 810
811 for (i = 0; i < 100; i++) { 811 for (i = 0; i < 100; i++) {
812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
813 &phy_data); 813 &phy_data);
814 if ((phy_data & MDIO_CTRL1_RESET) == 0) 814 if ((phy_data & MDIO_CTRL1_RESET) == 0)
815 break; 815 break;
816 usleep_range(10000, 20000); 816 usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
824 824
825 /* Get init offsets */ 825 /* Get init offsets */
826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
827 &data_offset); 827 &data_offset);
828 if (ret_val != 0) 828 if (ret_val != 0)
829 goto out; 829 goto out;
830 830
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
838 if (ret_val) 838 if (ret_val)
839 goto err_eeprom; 839 goto err_eeprom;
840 control = (eword & IXGBE_CONTROL_MASK_NL) >> 840 control = (eword & IXGBE_CONTROL_MASK_NL) >>
841 IXGBE_CONTROL_SHIFT_NL; 841 IXGBE_CONTROL_SHIFT_NL;
842 edata = eword & IXGBE_DATA_MASK_NL; 842 edata = eword & IXGBE_DATA_MASK_NL;
843 switch (control) { 843 switch (control) {
844 case IXGBE_DELAY_NL: 844 case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
859 if (ret_val) 859 if (ret_val)
860 goto err_eeprom; 860 goto err_eeprom;
861 hw->phy.ops.write_reg(hw, phy_offset, 861 hw->phy.ops.write_reg(hw, phy_offset,
862 MDIO_MMD_PMAPMD, eword); 862 MDIO_MMD_PMAPMD, eword);
863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
864 phy_offset); 864 phy_offset);
865 data_offset++; 865 data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1011 if (hw->bus.lan_id == 0) 1011 if (hw->bus.lan_id == 0)
1012 hw->phy.sfp_type = 1012 hw->phy.sfp_type =
1013 ixgbe_sfp_type_da_cu_core0; 1013 ixgbe_sfp_type_da_cu_core0;
1014 else 1014 else
1015 hw->phy.sfp_type = 1015 hw->phy.sfp_type =
1016 ixgbe_sfp_type_da_cu_core1; 1016 ixgbe_sfp_type_da_cu_core1;
1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { 1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1018 hw->phy.ops.read_i2c_eeprom( 1018 hw->phy.ops.read_i2c_eeprom(
1019 hw, IXGBE_SFF_CABLE_SPEC_COMP, 1019 hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1035 IXGBE_SFF_10GBASELR_CAPABLE)) { 1035 IXGBE_SFF_10GBASELR_CAPABLE)) {
1036 if (hw->bus.lan_id == 0) 1036 if (hw->bus.lan_id == 0)
1037 hw->phy.sfp_type = 1037 hw->phy.sfp_type =
1038 ixgbe_sfp_type_srlr_core0; 1038 ixgbe_sfp_type_srlr_core0;
1039 else 1039 else
1040 hw->phy.sfp_type = 1040 hw->phy.sfp_type =
1041 ixgbe_sfp_type_srlr_core1; 1041 ixgbe_sfp_type_srlr_core1;
1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { 1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1043 if (hw->bus.lan_id == 0) 1043 if (hw->bus.lan_id == 0)
1044 hw->phy.sfp_type = 1044 hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1087 goto err_read_i2c_eeprom; 1087 goto err_read_i2c_eeprom;
1088 1088
1089 status = hw->phy.ops.read_i2c_eeprom(hw, 1089 status = hw->phy.ops.read_i2c_eeprom(hw,
1090 IXGBE_SFF_VENDOR_OUI_BYTE1, 1090 IXGBE_SFF_VENDOR_OUI_BYTE1,
1091 &oui_bytes[1]); 1091 &oui_bytes[1]);
1092 1092
1093 if (status != 0) 1093 if (status != 0)
1094 goto err_read_i2c_eeprom; 1094 goto err_read_i2c_eeprom;
1095 1095
1096 status = hw->phy.ops.read_i2c_eeprom(hw, 1096 status = hw->phy.ops.read_i2c_eeprom(hw,
1097 IXGBE_SFF_VENDOR_OUI_BYTE2, 1097 IXGBE_SFF_VENDOR_OUI_BYTE2,
1098 &oui_bytes[2]); 1098 &oui_bytes[2]);
1099 1099
1100 if (status != 0) 1100 if (status != 0)
1101 goto err_read_i2c_eeprom; 1101 goto err_read_i2c_eeprom;
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
1403 * so it returns the offsets to the phy init sequence block. 1403 * so it returns the offsets to the phy init sequence block.
1404 **/ 1404 **/
1405s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1405s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1406 u16 *list_offset, 1406 u16 *list_offset,
1407 u16 *data_offset) 1407 u16 *data_offset)
1408{ 1408{
1409 u16 sfp_id; 1409 u16 sfp_id;
1410 u16 sfp_type = hw->phy.sfp_type; 1410 u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
1493 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1493 * Performs byte read operation to SFP module's EEPROM over I2C interface.
1494 **/ 1494 **/
1495s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1495s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1496 u8 *eeprom_data) 1496 u8 *eeprom_data)
1497{ 1497{
1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset, 1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1499 IXGBE_I2C_EEPROM_DEV_ADDR, 1499 IXGBE_I2C_EEPROM_DEV_ADDR,
1500 eeprom_data); 1500 eeprom_data);
1501} 1501}
1502 1502
1503/** 1503/**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1525 * Performs byte write operation to SFP module's EEPROM over I2C interface. 1525 * Performs byte write operation to SFP module's EEPROM over I2C interface.
1526 **/ 1526 **/
1527s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1527s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1528 u8 eeprom_data) 1528 u8 eeprom_data)
1529{ 1529{
1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset, 1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1531 IXGBE_I2C_EEPROM_DEV_ADDR, 1531 IXGBE_I2C_EEPROM_DEV_ADDR,
1532 eeprom_data); 1532 eeprom_data);
1533} 1533}
1534 1534
1535/** 1535/**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1542 * a specified device address. 1542 * a specified device address.
1543 **/ 1543 **/
1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1545 u8 dev_addr, u8 *data) 1545 u8 dev_addr, u8 *data)
1546{ 1546{
1547 s32 status = 0; 1547 s32 status = 0;
1548 u32 max_retry = 10; 1548 u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
1631 * a specified device address. 1631 * a specified device address.
1632 **/ 1632 **/
1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1634 u8 dev_addr, u8 data) 1634 u8 dev_addr, u8 data)
1635{ 1635{
1636 s32 status = 0; 1636 s32 status = 0;
1637 u32 max_retry = 1; 1637 u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2046 2046
2047 /* Check that the LASI temp alarm status was triggered */ 2047 /* Check that the LASI temp alarm status was triggered */
2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2049 MDIO_MMD_PMAPMD, &phy_data); 2049 MDIO_MMD_PMAPMD, &phy_data);
2050 2050
2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2052 goto out; 2052 goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 0bb047f751c2..54071ed17e3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); 115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
117 u32 device_type, u16 *phy_data); 117 u32 device_type, u16 *phy_data);
118s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 118s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
119 u32 device_type, u16 phy_data); 119 u32 device_type, u16 phy_data);
120s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 120s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
121 u32 device_type, u16 *phy_data); 121 u32 device_type, u16 *phy_data);
122s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 122s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
123 u32 device_type, u16 phy_data); 123 u32 device_type, u16 phy_data);
124s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 124s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
125s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 125s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
126 ixgbe_link_speed speed, 126 ixgbe_link_speed speed,
127 bool autoneg_wait_to_complete); 127 bool autoneg_wait_to_complete);
128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
129 ixgbe_link_speed *speed, 129 ixgbe_link_speed *speed,
130 bool *autoneg); 130 bool *autoneg);
131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); 131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
132 132
133/* PHY specific */ 133/* PHY specific */
134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
135 ixgbe_link_speed *speed, 135 ixgbe_link_speed *speed,
136 bool *link_up); 136 bool *link_up);
137s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); 137s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
138s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 138s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
139 u16 *firmware_version); 139 u16 *firmware_version);
140s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 140s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
141 u16 *firmware_version); 141 u16 *firmware_version);
142 142
143s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 143s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
144s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 144s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
145s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 145s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
146s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 146s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
147 u16 *list_offset, 147 u16 *list_offset,
148 u16 *data_offset); 148 u16 *data_offset);
149s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); 149s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
150s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 150s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
151 u8 dev_addr, u8 *data); 151 u8 dev_addr, u8 *data);
152s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 152s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
153 u8 dev_addr, u8 data); 153 u8 dev_addr, u8 data);
154s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 154s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
155 u8 *eeprom_data); 155 u8 *eeprom_data);
156s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, 156s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
157 u8 *sff8472_data); 157 u8 *sff8472_data);
158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
159 u8 eeprom_data); 159 u8 eeprom_data);
160#endif /* _IXGBE_PHY_H_ */ 160#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 8902ae683457..68f87ecb8a76 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,7 +26,6 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28#include "ixgbe.h" 28#include "ixgbe.h"
29#include <linux/export.h>
30#include <linux/ptp_classify.h> 29#include <linux/ptp_classify.h>
31 30
32/* 31/*
@@ -334,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
334} 333}
335 334
336/** 335/**
337 * ixgbe_ptp_enable 336 * ixgbe_ptp_feature_enable
338 * @ptp: the ptp clock structure 337 * @ptp: the ptp clock structure
339 * @rq: the requested feature to change 338 * @rq: the requested feature to change
340 * @on: whether to enable or disable the feature 339 * @on: whether to enable or disable the feature
@@ -342,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
342 * enable (or disable) ancillary features of the phc subsystem. 341 * enable (or disable) ancillary features of the phc subsystem.
343 * our driver only supports the PPS feature on the X540 342 * our driver only supports the PPS feature on the X540
344 */ 343 */
345static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, 344static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
346 struct ptp_clock_request *rq, int on) 345 struct ptp_clock_request *rq, int on)
347{ 346{
348 struct ixgbe_adapter *adapter = 347 struct ixgbe_adapter *adapter =
349 container_of(ptp, struct ixgbe_adapter, ptp_caps); 348 container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -570,9 +569,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
570} 569}
571 570
572/** 571/**
573 * ixgbe_ptp_set_ts_config - control hardware time stamping 572 * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
574 * @adapter: pointer to adapter struct 573 * @adapter: the private ixgbe adapter structure
575 * @ifreq: ioctl data 574 * @config: the hwtstamp configuration requested
576 * 575 *
577 * Outgoing time stamping can be enabled and disabled. Play nice and 576 * Outgoing time stamping can be enabled and disabled. Play nice and
578 * disable it when requested, although it shouldn't cause any overhead 577 * disable it when requested, although it shouldn't cause any overhead
@@ -590,25 +589,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
590 * packets, regardless of the type specified in the register, only use V2 589 * packets, regardless of the type specified in the register, only use V2
591 * Event mode. This more accurately tells the user what the hardware is going 590 * Event mode. This more accurately tells the user what the hardware is going
592 * to do anyways. 591 * to do anyways.
592 *
593 * Note: this may modify the hwtstamp configuration towards a more general
594 * mode, if required to support the specifically requested mode.
593 */ 595 */
594int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 596static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
597 struct hwtstamp_config *config)
595{ 598{
596 struct ixgbe_hw *hw = &adapter->hw; 599 struct ixgbe_hw *hw = &adapter->hw;
597 struct hwtstamp_config config;
598 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; 600 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
599 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; 601 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
600 u32 tsync_rx_mtrl = PTP_EV_PORT << 16; 602 u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
601 bool is_l2 = false; 603 bool is_l2 = false;
602 u32 regval; 604 u32 regval;
603 605
604 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
605 return -EFAULT;
606
607 /* reserved for future extensions */ 606 /* reserved for future extensions */
608 if (config.flags) 607 if (config->flags)
609 return -EINVAL; 608 return -EINVAL;
610 609
611 switch (config.tx_type) { 610 switch (config->tx_type) {
612 case HWTSTAMP_TX_OFF: 611 case HWTSTAMP_TX_OFF:
613 tsync_tx_ctl = 0; 612 tsync_tx_ctl = 0;
614 case HWTSTAMP_TX_ON: 613 case HWTSTAMP_TX_ON:
@@ -617,7 +616,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
617 return -ERANGE; 616 return -ERANGE;
618 } 617 }
619 618
620 switch (config.rx_filter) { 619 switch (config->rx_filter) {
621 case HWTSTAMP_FILTER_NONE: 620 case HWTSTAMP_FILTER_NONE:
622 tsync_rx_ctl = 0; 621 tsync_rx_ctl = 0;
623 tsync_rx_mtrl = 0; 622 tsync_rx_mtrl = 0;
@@ -641,7 +640,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
641 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 640 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
642 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; 641 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
643 is_l2 = true; 642 is_l2 = true;
644 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 643 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645 break; 644 break;
646 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 645 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
647 case HWTSTAMP_FILTER_ALL: 646 case HWTSTAMP_FILTER_ALL:
@@ -652,7 +651,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
652 * Delay_Req messages and hardware does not support 651 * Delay_Req messages and hardware does not support
653 * timestamping all packets => return error 652 * timestamping all packets => return error
654 */ 653 */
655 config.rx_filter = HWTSTAMP_FILTER_NONE; 654 config->rx_filter = HWTSTAMP_FILTER_NONE;
656 return -ERANGE; 655 return -ERANGE;
657 } 656 }
658 657
@@ -671,7 +670,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
671 else 670 else
672 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 671 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
673 672
674
675 /* enable/disable TX */ 673 /* enable/disable TX */
676 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 674 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
677 regval &= ~IXGBE_TSYNCTXCTL_ENABLED; 675 regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -693,6 +691,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
693 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); 691 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
694 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 692 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
695 693
694 return 0;
695}
696
697/**
698 * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
699 * @adapter: pointer to adapter struct
700 * @ifreq: ioctl data
701 *
702 * Set hardware to requested mode. If unsupported, return an error with no
703 * changes. Otherwise, store the mode for future reference.
704 */
705int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
706{
707 struct hwtstamp_config config;
708 int err;
709
710 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
711 return -EFAULT;
712
713 err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
714 if (err)
715 return err;
716
696 /* save these settings for future reference */ 717 /* save these settings for future reference */
697 memcpy(&adapter->tstamp_config, &config, 718 memcpy(&adapter->tstamp_config, &config,
698 sizeof(adapter->tstamp_config)); 719 sizeof(adapter->tstamp_config));
@@ -790,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
790 * ixgbe_ptp_reset 811 * ixgbe_ptp_reset
791 * @adapter: the ixgbe private board structure 812 * @adapter: the ixgbe private board structure
792 * 813 *
793 * When the MAC resets, all timesync features are reset. This function should be 814 * When the MAC resets, all the hardware bits for timesync are reset. This
794 * called to re-enable the PTP clock structure. It will re-init the timecounter 815 * function is used to re-enable the device for PTP based on current settings.
795 * structure based on the kernel time as well as setup the cycle counter data. 816 * We do lose the current clock time, so just reset the cyclecounter to the
817 * system real clock time.
818 *
819 * This function will maintain hwtstamp_config settings, and resets the SDP
820 * output if it was enabled.
796 */ 821 */
797void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) 822void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
798{ 823{
@@ -804,8 +829,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
804 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); 829 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
805 IXGBE_WRITE_FLUSH(hw); 830 IXGBE_WRITE_FLUSH(hw);
806 831
807 /* Reset the saved tstamp_config */ 832 /* reset the hardware timestamping mode */
808 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); 833 ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
809 834
810 ixgbe_ptp_start_cyclecounter(adapter); 835 ixgbe_ptp_start_cyclecounter(adapter);
811 836
@@ -825,16 +850,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
825} 850}
826 851
827/** 852/**
828 * ixgbe_ptp_init 853 * ixgbe_ptp_create_clock
829 * @adapter: the ixgbe private adapter structure 854 * @adapter: the ixgbe private adapter structure
830 * 855 *
831 * This function performs the required steps for enabling ptp 856 * This function performs setup of the user entry point function table and
832 * support. If ptp support has already been loaded it simply calls the 857 * initializes the PTP clock device, which is used to access the clock-like
833 * cyclecounter init routine and exits. 858 * features of the PTP core. It will be called by ixgbe_ptp_init, only if
859 * there isn't already a clock device (such as after a suspend/resume cycle,
860 * where the clock device wasn't destroyed).
834 */ 861 */
835void ixgbe_ptp_init(struct ixgbe_adapter *adapter) 862static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
836{ 863{
837 struct net_device *netdev = adapter->netdev; 864 struct net_device *netdev = adapter->netdev;
865 long err;
866
867 /* do nothing if we already have a clock device */
868 if (!IS_ERR_OR_NULL(adapter->ptp_clock))
869 return 0;
838 870
839 switch (adapter->hw.mac.type) { 871 switch (adapter->hw.mac.type) {
840 case ixgbe_mac_X540: 872 case ixgbe_mac_X540:
@@ -851,7 +883,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
851 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; 883 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
852 adapter->ptp_caps.gettime = ixgbe_ptp_gettime; 884 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
853 adapter->ptp_caps.settime = ixgbe_ptp_settime; 885 adapter->ptp_caps.settime = ixgbe_ptp_settime;
854 adapter->ptp_caps.enable = ixgbe_ptp_enable; 886 adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
855 break; 887 break;
856 case ixgbe_mac_82599EB: 888 case ixgbe_mac_82599EB:
857 snprintf(adapter->ptp_caps.name, 889 snprintf(adapter->ptp_caps.name,
@@ -867,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
867 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; 899 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
868 adapter->ptp_caps.gettime = ixgbe_ptp_gettime; 900 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
869 adapter->ptp_caps.settime = ixgbe_ptp_settime; 901 adapter->ptp_caps.settime = ixgbe_ptp_settime;
870 adapter->ptp_caps.enable = ixgbe_ptp_enable; 902 adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
871 break; 903 break;
872 default: 904 default:
873 adapter->ptp_clock = NULL; 905 adapter->ptp_clock = NULL;
874 return; 906 return -EOPNOTSUPP;
875 } 907 }
876 908
877 spin_lock_init(&adapter->tmreg_lock);
878 INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
879
880 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 909 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
881 &adapter->pdev->dev); 910 &adapter->pdev->dev);
882 if (IS_ERR(adapter->ptp_clock)) { 911 if (IS_ERR(adapter->ptp_clock)) {
912 err = PTR_ERR(adapter->ptp_clock);
883 adapter->ptp_clock = NULL; 913 adapter->ptp_clock = NULL;
884 e_dev_err("ptp_clock_register failed\n"); 914 e_dev_err("ptp_clock_register failed\n");
915 return err;
885 } else 916 } else
886 e_dev_info("registered PHC device on %s\n", netdev->name); 917 e_dev_info("registered PHC device on %s\n", netdev->name);
887 918
919 /* set default timestamp mode to disabled here. We do this in
920 * create_clock instead of init, because we don't want to override the
921 * previous settings during a resume cycle.
922 */
923 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
924 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
925
926 return 0;
927}
928
929/**
930 * ixgbe_ptp_init
931 * @adapter: the ixgbe private adapter structure
932 *
933 * This function performs the required steps for enabling PTP
934 * support. If PTP support has already been loaded it simply calls the
935 * cyclecounter init routine and exits.
936 */
937void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
938{
939 /* initialize the spin lock first since we can't control when a user
940 * will call the entry functions once we have initialized the clock
941 * device
942 */
943 spin_lock_init(&adapter->tmreg_lock);
944
945 /* obtain a PTP device, or re-use an existing device */
946 if (ixgbe_ptp_create_clock(adapter))
947 return;
948
949 /* we have a clock so we can initialize work now */
950 INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
951
952 /* reset the PTP related hardware bits */
888 ixgbe_ptp_reset(adapter); 953 ixgbe_ptp_reset(adapter);
889 954
890 /* enter the IXGBE_PTP_RUNNING state */ 955 /* enter the IXGBE_PTP_RUNNING state */
@@ -894,28 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
894} 959}
895 960
896/** 961/**
897 * ixgbe_ptp_stop - disable ptp device and stop the overflow check 962 * ixgbe_ptp_suspend - stop PTP work items
898 * @adapter: pointer to adapter struct 963 * @ adapter: pointer to adapter struct
899 * 964 *
900 * this function stops the ptp support, and cancels the delayed work. 965 * this function suspends PTP activity, and prevents more PTP work from being
966 * generated, but does not destroy the PTP clock device.
901 */ 967 */
902void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 968void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
903{ 969{
904 /* Leave the IXGBE_PTP_RUNNING state. */ 970 /* Leave the IXGBE_PTP_RUNNING state. */
905 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 971 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
906 return; 972 return;
907 973
908 /* stop the PPS signal */ 974 /* since this might be called in suspend, we don't clear the state,
909 adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; 975 * but simply reset the auxiliary PPS signal control register
910 ixgbe_ptp_setup_sdp(adapter); 976 */
977 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
911 978
979 /* ensure that we cancel any pending PTP Tx work item in progress */
912 cancel_work_sync(&adapter->ptp_tx_work); 980 cancel_work_sync(&adapter->ptp_tx_work);
913 if (adapter->ptp_tx_skb) { 981 if (adapter->ptp_tx_skb) {
914 dev_kfree_skb_any(adapter->ptp_tx_skb); 982 dev_kfree_skb_any(adapter->ptp_tx_skb);
915 adapter->ptp_tx_skb = NULL; 983 adapter->ptp_tx_skb = NULL;
916 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 984 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
917 } 985 }
986}
987
988/**
989 * ixgbe_ptp_stop - close the PTP device
990 * @adapter: pointer to adapter struct
991 *
992 * completely destroy the PTP device, should only be called when the device is
993 * being fully closed.
994 */
995void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
996{
997 /* first, suspend PTP activity */
998 ixgbe_ptp_suspend(adapter);
918 999
1000 /* disable the PTP clock device */
919 if (adapter->ptp_clock) { 1001 if (adapter->ptp_clock) {
920 ptp_clock_unregister(adapter->ptp_clock); 1002 ptp_clock_unregister(adapter->ptp_clock);
921 adapter->ptp_clock = NULL; 1003 adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..16b3a1cd9db6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
72 for (i = 0; i < num_vf_macvlans; i++) { 72 for (i = 0; i < num_vf_macvlans; i++) {
73 mv_list->vf = -1; 73 mv_list->vf = -1;
74 mv_list->free = true; 74 mv_list->free = true;
75 mv_list->rar_entry = hw->mac.num_rar_entries -
76 (i + adapter->num_vfs + 1);
77 list_add(&mv_list->l, &adapter->vf_mvs.l); 75 list_add(&mv_list->l, &adapter->vf_mvs.l);
78 mv_list++; 76 mv_list++;
79 } 77 }
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
327 u32 vector_bit; 325 u32 vector_bit;
328 u32 vector_reg; 326 u32 vector_reg;
329 u32 mta_reg; 327 u32 mta_reg;
328 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
330 329
331 /* only so many hash values supported */ 330 /* only so many hash values supported */
332 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 331 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
353 mta_reg |= (1 << vector_bit); 352 mta_reg |= (1 << vector_bit);
354 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 353 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
355 } 354 }
355 vmolr |= IXGBE_VMOLR_ROMPE;
356 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
356 357
357 return 0; 358 return 0;
358} 359}
359 360
360static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) 361#ifdef CONFIG_PCI_IOV
361{
362 struct ixgbe_hw *hw = &adapter->hw;
363 struct list_head *pos;
364 struct vf_macvlans *entry;
365
366 list_for_each(pos, &adapter->vf_mvs.l) {
367 entry = list_entry(pos, struct vf_macvlans, l);
368 if (!entry->free)
369 hw->mac.ops.set_rar(hw, entry->rar_entry,
370 entry->vf_macvlan,
371 entry->vf, IXGBE_RAH_AV);
372 }
373}
374
375void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 362void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
376{ 363{
377 struct ixgbe_hw *hw = &adapter->hw; 364 struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
382 u32 mta_reg; 369 u32 mta_reg;
383 370
384 for (i = 0; i < adapter->num_vfs; i++) { 371 for (i = 0; i < adapter->num_vfs; i++) {
372 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
385 vfinfo = &adapter->vfinfo[i]; 373 vfinfo = &adapter->vfinfo[i];
386 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { 374 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
387 hw->addr_ctrl.mta_in_use++; 375 hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
391 mta_reg |= (1 << vector_bit); 379 mta_reg |= (1 << vector_bit);
392 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 380 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
393 } 381 }
382
383 if (vfinfo->num_vf_mc_hashes)
384 vmolr |= IXGBE_VMOLR_ROMPE;
385 else
386 vmolr &= ~IXGBE_VMOLR_ROMPE;
387 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
394 } 388 }
395 389
396 /* Restore any VF macvlans */ 390 /* Restore any VF macvlans */
397 ixgbe_restore_vf_macvlans(adapter); 391 ixgbe_full_sync_mac_table(adapter);
398} 392}
393#endif
399 394
400static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 395static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
401 u32 vf) 396 u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
495static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 490static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
496{ 491{
497 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 492 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
498 vmolr |= (IXGBE_VMOLR_ROMPE | 493 vmolr |= IXGBE_VMOLR_BAM;
499 IXGBE_VMOLR_BAM);
500 if (aupe) 494 if (aupe)
501 vmolr |= IXGBE_VMOLR_AUPE; 495 vmolr |= IXGBE_VMOLR_AUPE;
502 else 496 else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
514{ 508{
515 struct ixgbe_hw *hw = &adapter->hw; 509 struct ixgbe_hw *hw = &adapter->hw;
516 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 510 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
517 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
518 u8 num_tcs = netdev_get_num_tc(adapter->netdev); 511 u8 num_tcs = netdev_get_num_tc(adapter->netdev);
519 512
520 /* add PF assigned VLAN or VLAN 0 */ 513 /* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
544 /* Flush and reset the mta with the new values */ 537 /* Flush and reset the mta with the new values */
545 ixgbe_set_rx_mode(adapter->netdev); 538 ixgbe_set_rx_mode(adapter->netdev);
546 539
547 hw->mac.ops.clear_rar(hw, rar_entry); 540 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
548 541
549 /* reset VF api back to unknown */ 542 /* reset VF api back to unknown */
550 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 543 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
553static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 546static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
554 int vf, unsigned char *mac_addr) 547 int vf, unsigned char *mac_addr)
555{ 548{
556 struct ixgbe_hw *hw = &adapter->hw; 549 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
557 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
558
559 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 550 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
560 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); 551 ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
561 552
562 return 0; 553 return 0;
563} 554}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
565static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, 556static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
566 int vf, int index, unsigned char *mac_addr) 557 int vf, int index, unsigned char *mac_addr)
567{ 558{
568 struct ixgbe_hw *hw = &adapter->hw;
569 struct list_head *pos; 559 struct list_head *pos;
570 struct vf_macvlans *entry; 560 struct vf_macvlans *entry;
571 561
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
576 entry->vf = -1; 566 entry->vf = -1;
577 entry->free = true; 567 entry->free = true;
578 entry->is_macvlan = false; 568 entry->is_macvlan = false;
579 hw->mac.ops.clear_rar(hw, entry->rar_entry); 569 ixgbe_del_mac_filter(adapter,
570 entry->vf_macvlan, vf);
580 } 571 }
581 } 572 }
582 } 573 }
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
612 entry->vf = vf; 603 entry->vf = vf;
613 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); 604 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
614 605
615 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); 606 ixgbe_add_mac_filter(adapter, mac_addr, vf);
616 607
617 return 0; 608 return 0;
618} 609}
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1138 adapter->vfinfo[vf].vlan_count--; 1129 adapter->vfinfo[vf].vlan_count--;
1139 adapter->vfinfo[vf].pf_vlan = 0; 1130 adapter->vfinfo[vf].pf_vlan = 0;
1140 adapter->vfinfo[vf].pf_qos = 0; 1131 adapter->vfinfo[vf].pf_qos = 0;
1141 } 1132 }
1142out: 1133out:
1143 return err; 1134 return err;
1144} 1135}
1145 1136
1146static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1137static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1231 } 1222 }
1232} 1223}
1233 1224
1234int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 1225int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1226 int max_tx_rate)
1235{ 1227{
1236 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1228 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1237 int link_speed; 1229 int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
1249 if (link_speed != 10000) 1241 if (link_speed != 10000)
1250 return -EINVAL; 1242 return -EINVAL;
1251 1243
1244 if (min_tx_rate)
1245 return -EINVAL;
1246
1252 /* rate limit cannot be less than 10Mbs or greater than link speed */ 1247 /* rate limit cannot be less than 10Mbs or greater than link speed */
1253 if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) 1248 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1254 return -EINVAL; 1249 return -EINVAL;
1255 1250
1256 /* store values */ 1251 /* store values */
1257 adapter->vf_rate_link_speed = link_speed; 1252 adapter->vf_rate_link_speed = link_speed;
1258 adapter->vfinfo[vf].tx_rate = tx_rate; 1253 adapter->vfinfo[vf].tx_rate = max_tx_rate;
1259 1254
1260 /* update hardware configuration */ 1255 /* update hardware configuration */
1261 ixgbe_set_vf_rate_limit(adapter, vf); 1256 ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1297 return -EINVAL; 1292 return -EINVAL;
1298 ivi->vf = vf; 1293 ivi->vf = vf;
1299 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 1294 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1300 ivi->tx_rate = adapter->vfinfo[vf].tx_rate; 1295 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1296 ivi->min_tx_rate = 0;
1301 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 1297 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1302 ivi->qos = adapter->vfinfo[vf].pf_qos; 1298 ivi->qos = adapter->vfinfo[vf].pf_qos;
1303 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; 1299 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..32c26d586c01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
34 */ 34 */
35#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) 35#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
36 36
37#ifdef CONFIG_PCI_IOV
37void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 38void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
39#endif
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 40void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); 41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
40void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
42int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); 44int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
43int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, 45int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
44 u8 qos); 46 u8 qos);
45int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 47int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
48 int max_tx_rate);
46int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); 49int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
47int ixgbe_ndo_get_vf_config(struct net_device *netdev, 50int ixgbe_ndo_get_vf_config(struct net_device *netdev,
48 int vf, struct ifla_vf_info *ivi); 51 int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..9a89f98b35f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
160#define IXGBE_MAX_EITR 0x00000FF8 160#define IXGBE_MAX_EITR 0x00000FF8
161#define IXGBE_MIN_EITR 8 161#define IXGBE_MIN_EITR 8
162#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ 162#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
163 (0x012300 + (((_i) - 24) * 4))) 163 (0x012300 + (((_i) - 24) * 4)))
164#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 164#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
165#define IXGBE_EITR_LLI_MOD 0x00008000 165#define IXGBE_EITR_LLI_MOD 0x00008000
166#define IXGBE_EITR_CNT_WDIS 0x80000000 166#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
213 * 64-127: 0x0D014 + (n-64)*0x40 213 * 64-127: 0x0D014 + (n-64)*0x40
214 */ 214 */
215#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 215#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
216 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 216 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
217 (0x0D014 + (((_i) - 64) * 0x40)))) 217 (0x0D014 + (((_i) - 64) * 0x40))))
218/* 218/*
219 * Rx DCA Control Register: 219 * Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
222 * 64-127: 0x0D00C + (n-64)*0x40 222 * 64-127: 0x0D00C + (n-64)*0x40
223 */ 223 */
224#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 224#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
225 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 225 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
226 (0x0D00C + (((_i) - 64) * 0x40)))) 226 (0x0D00C + (((_i) - 64) * 0x40))))
227#define IXGBE_RDRXCTL 0x02F00 227#define IXGBE_RDRXCTL 0x02F00
228#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 228#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
229 /* 8 of these 0x03C00 - 0x03C1C */ 229 /* 8 of these 0x03C00 - 0x03C1C */
230#define IXGBE_RXCTRL 0x03000 230#define IXGBE_RXCTRL 0x03000
231#define IXGBE_DROPEN 0x03D04 231#define IXGBE_DROPEN 0x03D04
232#define IXGBE_RXPBSIZE_SHIFT 10 232#define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
239/* Multicast Table Array - 128 entries */ 239/* Multicast Table Array - 128 entries */
240#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 240#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
241#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 241#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
242 (0x0A200 + ((_i) * 8))) 242 (0x0A200 + ((_i) * 8)))
243#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 243#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
244 (0x0A204 + ((_i) * 8))) 244 (0x0A204 + ((_i) * 8)))
245#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) 245#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
246#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) 246#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
247/* Packet split receive type */ 247/* Packet split receive type */
248#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ 248#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
249 (0x0EA00 + ((_i) * 4))) 249 (0x0EA00 + ((_i) * 4)))
250/* array of 4096 1-bit vlan filters */ 250/* array of 4096 1-bit vlan filters */
251#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 251#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
252/*array of 4096 4-bit vlan vmdq indices */ 252/*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
696 696
697#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) 697#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
698#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ 698#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
699 (0x08600 + ((_i) * 4))) 699 (0x08600 + ((_i) * 4)))
700#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) 700#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
701 701
702#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 702#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
820#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 820#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
821#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 821#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
822#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ 822#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
823 IXGBE_GCR_EXT_VT_MODE_64) 823 IXGBE_GCR_EXT_VT_MODE_64)
824 824
825/* Time Sync Registers */ 825/* Time Sync Registers */
826#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 826#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
1396#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 1396#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
1397 1397
1398#define IXGBE_EIMS_ENABLE_MASK ( \ 1398#define IXGBE_EIMS_ENABLE_MASK ( \
1399 IXGBE_EIMS_RTX_QUEUE | \ 1399 IXGBE_EIMS_RTX_QUEUE | \
1400 IXGBE_EIMS_LSC | \ 1400 IXGBE_EIMS_LSC | \
1401 IXGBE_EIMS_TCP_TIMER | \ 1401 IXGBE_EIMS_TCP_TIMER | \
1402 IXGBE_EIMS_OTHER) 1402 IXGBE_EIMS_OTHER)
1403 1403
1404/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 1404/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
1405#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 1405#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
2161 2161
2162/* Masks to determine if packets should be dropped due to frame errors */ 2162/* Masks to determine if packets should be dropped due to frame errors */
2163#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ 2163#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
2164 IXGBE_RXD_ERR_CE | \ 2164 IXGBE_RXD_ERR_CE | \
2165 IXGBE_RXD_ERR_LE | \ 2165 IXGBE_RXD_ERR_LE | \
2166 IXGBE_RXD_ERR_PE | \ 2166 IXGBE_RXD_ERR_PE | \
2167 IXGBE_RXD_ERR_OSE | \ 2167 IXGBE_RXD_ERR_OSE | \
2168 IXGBE_RXD_ERR_USE) 2168 IXGBE_RXD_ERR_USE)
2169 2169
2170#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ 2170#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
2171 IXGBE_RXDADV_ERR_CE | \ 2171 IXGBE_RXDADV_ERR_CE | \
2172 IXGBE_RXDADV_ERR_LE | \ 2172 IXGBE_RXDADV_ERR_LE | \
2173 IXGBE_RXDADV_ERR_PE | \ 2173 IXGBE_RXDADV_ERR_PE | \
2174 IXGBE_RXDADV_ERR_OSE | \ 2174 IXGBE_RXDADV_ERR_OSE | \
2175 IXGBE_RXDADV_ERR_USE) 2175 IXGBE_RXDADV_ERR_USE)
2176 2176
2177/* Multicast bit mask */ 2177/* Multicast bit mask */
2178#define IXGBE_MCSTCTRL_MFE 0x4 2178#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
2393#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ 2393#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
2394#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 2394#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
2395#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 2395#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
2396 IXGBE_ADVTXD_POPTS_SHIFT) 2396 IXGBE_ADVTXD_POPTS_SHIFT)
2397#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 2397#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
2398 IXGBE_ADVTXD_POPTS_SHIFT) 2398 IXGBE_ADVTXD_POPTS_SHIFT)
2399#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 2399#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
2400#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 2400#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
2401#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 2401#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
2435#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 2435#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
2436#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 2436#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
2437#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ 2437#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
2438 IXGBE_LINK_SPEED_10GB_FULL) 2438 IXGBE_LINK_SPEED_10GB_FULL)
2439#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 2439#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
2440 IXGBE_LINK_SPEED_1GB_FULL | \ 2440 IXGBE_LINK_SPEED_1GB_FULL | \
2441 IXGBE_LINK_SPEED_10GB_FULL) 2441 IXGBE_LINK_SPEED_10GB_FULL)
2442 2442
2443 2443
2444/* Physical layer type */ 2444/* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
2746/* Flow control parameters */ 2746/* Flow control parameters */
2747struct ixgbe_fc_info { 2747struct ixgbe_fc_info {
2748 u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ 2748 u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
2749 u32 low_water; /* Flow Control Low-water */ 2749 u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
2750 u16 pause_time; /* Flow Control Pause timer */ 2750 u16 pause_time; /* Flow Control Pause timer */
2751 bool send_xon; /* Flow control send XON */ 2751 bool send_xon; /* Flow control send XON */
2752 bool strict_ieee; /* Strict IEEE mode */ 2752 bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
2840 2840
2841/* iterator type for walking multicast address lists */ 2841/* iterator type for walking multicast address lists */
2842typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, 2842typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
2843 u32 *vmdq); 2843 u32 *vmdq);
2844 2844
2845/* Function pointer table */ 2845/* Function pointer table */
2846struct ixgbe_eeprom_operations { 2846struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
2890 bool *); 2890 bool *);
2891 2891
2892 /* Packet Buffer Manipulation */ 2892 /* Packet Buffer Manipulation */
2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); 2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 188a5974b85c..40dd798e1290 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
81 bool autoneg_wait_to_complete) 81 bool autoneg_wait_to_complete)
82{ 82{
83 return hw->phy.ops.setup_link_speed(hw, speed, 83 return hw->phy.ops.setup_link_speed(hw, speed,
84 autoneg_wait_to_complete); 84 autoneg_wait_to_complete);
85} 85}
86 86
87/** 87/**
@@ -155,7 +155,7 @@ mac_reset_top:
155 /* Add the SAN MAC address to the RAR only if it's a valid address */ 155 /* Add the SAN MAC address to the RAR only if it's a valid address */
156 if (is_valid_ether_addr(hw->mac.san_addr)) { 156 if (is_valid_ether_addr(hw->mac.san_addr)) {
157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
158 hw->mac.san_addr, 0, IXGBE_RAH_AV); 158 hw->mac.san_addr, 0, IXGBE_RAH_AV);
159 159
160 /* Save the SAN MAC RAR index */ 160 /* Save the SAN MAC RAR index */
161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
166 166
167 /* Store the alternative WWNN/WWPN prefix */ 167 /* Store the alternative WWNN/WWPN prefix */
168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
169 &hw->mac.wwpn_prefix); 169 &hw->mac.wwpn_prefix);
170 170
171reset_hw_out: 171reset_hw_out:
172 return status; 172 return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
237 237
238 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 238 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
240 IXGBE_EEC_SIZE_SHIFT); 240 IXGBE_EEC_SIZE_SHIFT);
241 eeprom->word_size = 1 << (eeprom_size + 241 eeprom->word_size = 1 << (eeprom_size +
242 IXGBE_EEPROM_WORD_SIZE_SHIFT); 242 IXGBE_EEPROM_WORD_SIZE_SHIFT);
243 243
244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
245 eeprom->type, eeprom->word_size); 245 eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
712 udelay(50); 712 udelay(50);
713 } 713 }
714 } else { 714 } else {
715 hw_dbg(hw, "Software semaphore SMBI between device drivers " 715 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
716 "not granted.\n");
717 } 716 }
718 717
719 return status; 718 return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
813 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 812 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
814 .get_media_type = &ixgbe_get_media_type_X540, 813 .get_media_type = &ixgbe_get_media_type_X540,
815 .get_supported_physical_layer = 814 .get_supported_physical_layer =
816 &ixgbe_get_supported_physical_layer_X540, 815 &ixgbe_get_supported_physical_layer_X540,
817 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 816 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
818 .get_mac_addr = &ixgbe_get_mac_addr_generic, 817 .get_mac_addr = &ixgbe_get_mac_addr_generic,
819 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 818 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1baecb60f065..d420f124633f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -135,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
135 ethtool_cmd_speed_set(ecmd, speed); 135 ethtool_cmd_speed_set(ecmd, speed);
136 ecmd->duplex = DUPLEX_FULL; 136 ecmd->duplex = DUPLEX_FULL;
137 } else { 137 } else {
138 ethtool_cmd_speed_set(ecmd, -1); 138 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
139 ecmd->duplex = -1; 139 ecmd->duplex = DUPLEX_UNKNOWN;
140 } 140 }
141 141
142 return 0; 142 return 0;
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
813 813
814void ixgbevf_set_ethtool_ops(struct net_device *netdev) 814void ixgbevf_set_ethtool_ops(struct net_device *netdev)
815{ 815{
816 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); 816 netdev->ethtool_ops = &ixgbevf_ethtool_ops;
817} 817}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de2793b06305..75467f83772c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86 86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 88MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
89MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION); 90MODULE_VERSION(DRV_VERSION);
91 91
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b7b8d74c22d9..b151a949f352 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -42,6 +42,7 @@
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/in.h> 43#include <linux/in.h>
44#include <linux/ip.h> 44#include <linux/ip.h>
45#include <net/tso.h>
45#include <linux/tcp.h> 46#include <linux/tcp.h>
46#include <linux/udp.h> 47#include <linux/udp.h>
47#include <linux/etherdevice.h> 48#include <linux/etherdevice.h>
@@ -179,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4";
179 * Misc definitions. 180 * Misc definitions.
180 */ 181 */
181#define DEFAULT_RX_QUEUE_SIZE 128 182#define DEFAULT_RX_QUEUE_SIZE 128
182#define DEFAULT_TX_QUEUE_SIZE 256 183#define DEFAULT_TX_QUEUE_SIZE 512
183#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 184#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
184 185
186#define TSO_HEADER_SIZE 128
185 187
188/* Max number of allowed TCP segments for software TSO */
189#define MV643XX_MAX_TSO_SEGS 100
190#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
191
192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
186/* 195/*
187 * RX/TX descriptors. 196 * RX/TX descriptors.
188 */ 197 */
@@ -250,6 +259,7 @@ struct tx_desc {
250#define GEN_TCP_UDP_CHECKSUM 0x00020000 259#define GEN_TCP_UDP_CHECKSUM 0x00020000
251#define UDP_FRAME 0x00010000 260#define UDP_FRAME 0x00010000
252#define MAC_HDR_EXTRA_4_BYTES 0x00008000 261#define MAC_HDR_EXTRA_4_BYTES 0x00008000
262#define GEN_TCP_UDP_CHK_FULL 0x00000400
253#define MAC_HDR_EXTRA_8_BYTES 0x00000200 263#define MAC_HDR_EXTRA_8_BYTES 0x00000200
254 264
255#define TX_IHL_SHIFT 11 265#define TX_IHL_SHIFT 11
@@ -345,6 +355,12 @@ struct tx_queue {
345 int tx_curr_desc; 355 int tx_curr_desc;
346 int tx_used_desc; 356 int tx_used_desc;
347 357
358 int tx_stop_threshold;
359 int tx_wake_threshold;
360
361 char *tso_hdrs;
362 dma_addr_t tso_hdrs_dma;
363
348 struct tx_desc *tx_desc_area; 364 struct tx_desc *tx_desc_area;
349 dma_addr_t tx_desc_dma; 365 dma_addr_t tx_desc_dma;
350 int tx_desc_area_size; 366 int tx_desc_area_size;
@@ -491,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
491 507
492 if (netif_tx_queue_stopped(nq)) { 508 if (netif_tx_queue_stopped(nq)) {
493 __netif_tx_lock(nq, smp_processor_id()); 509 __netif_tx_lock(nq, smp_processor_id());
494 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 510 if (txq->tx_desc_count <= txq->tx_wake_threshold)
495 netif_tx_wake_queue(nq); 511 netif_tx_wake_queue(nq);
496 __netif_tx_unlock(nq); 512 __netif_tx_unlock(nq);
497 } 513 }
@@ -661,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
661 return 0; 677 return 0;
662} 678}
663 679
680static inline __be16 sum16_as_be(__sum16 sum)
681{
682 return (__force __be16)sum;
683}
684
685static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
686 u16 *l4i_chk, u32 *command, int length)
687{
688 int ret;
689 u32 cmd = 0;
690
691 if (skb->ip_summed == CHECKSUM_PARTIAL) {
692 int hdr_len;
693 int tag_bytes;
694
695 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
696 skb->protocol != htons(ETH_P_8021Q));
697
698 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
699 tag_bytes = hdr_len - ETH_HLEN;
700
701 if (length - hdr_len > mp->shared->tx_csum_limit ||
702 unlikely(tag_bytes & ~12)) {
703 ret = skb_checksum_help(skb);
704 if (!ret)
705 goto no_csum;
706 return ret;
707 }
708
709 if (tag_bytes & 4)
710 cmd |= MAC_HDR_EXTRA_4_BYTES;
711 if (tag_bytes & 8)
712 cmd |= MAC_HDR_EXTRA_8_BYTES;
713
714 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
715 GEN_IP_V4_CHECKSUM |
716 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
717
718 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
719 * it seems we don't need to pass the initial checksum. */
720 switch (ip_hdr(skb)->protocol) {
721 case IPPROTO_UDP:
722 cmd |= UDP_FRAME;
723 *l4i_chk = 0;
724 break;
725 case IPPROTO_TCP:
726 *l4i_chk = 0;
727 break;
728 default:
729 WARN(1, "protocol not supported");
730 }
731 } else {
732no_csum:
733 /* Errata BTS #50, IHL must be 5 if no HW checksum */
734 cmd |= 5 << TX_IHL_SHIFT;
735 }
736 *command = cmd;
737 return 0;
738}
739
740static inline int
741txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
742 struct sk_buff *skb, char *data, int length,
743 bool last_tcp, bool is_last)
744{
745 int tx_index;
746 u32 cmd_sts;
747 struct tx_desc *desc;
748
749 tx_index = txq->tx_curr_desc++;
750 if (txq->tx_curr_desc == txq->tx_ring_size)
751 txq->tx_curr_desc = 0;
752 desc = &txq->tx_desc_area[tx_index];
753
754 desc->l4i_chk = 0;
755 desc->byte_cnt = length;
756 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
757 length, DMA_TO_DEVICE);
758 if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
759 WARN(1, "dma_map_single failed!\n");
760 return -ENOMEM;
761 }
762
763 cmd_sts = BUFFER_OWNED_BY_DMA;
764 if (last_tcp) {
765 /* last descriptor in the TCP packet */
766 cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
767 /* last descriptor in SKB */
768 if (is_last)
769 cmd_sts |= TX_ENABLE_INTERRUPT;
770 }
771 desc->cmd_sts = cmd_sts;
772 return 0;
773}
774
775static inline void
776txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
777{
778 struct mv643xx_eth_private *mp = txq_to_mp(txq);
779 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
780 int tx_index;
781 struct tx_desc *desc;
782 int ret;
783 u32 cmd_csum = 0;
784 u16 l4i_chk = 0;
785
786 tx_index = txq->tx_curr_desc;
787 desc = &txq->tx_desc_area[tx_index];
788
789 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
790 if (ret)
791 WARN(1, "failed to prepare checksum!");
792
793 /* Should we set this? Can't use the value from skb_tx_csum()
794 * as it's not the correct initial L4 checksum to use. */
795 desc->l4i_chk = 0;
796
797 desc->byte_cnt = hdr_len;
798 desc->buf_ptr = txq->tso_hdrs_dma +
799 txq->tx_curr_desc * TSO_HEADER_SIZE;
800 desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
801 GEN_CRC;
802
803 txq->tx_curr_desc++;
804 if (txq->tx_curr_desc == txq->tx_ring_size)
805 txq->tx_curr_desc = 0;
806}
807
808static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
809 struct net_device *dev)
810{
811 struct mv643xx_eth_private *mp = txq_to_mp(txq);
812 int total_len, data_left, ret;
813 int desc_count = 0;
814 struct tso_t tso;
815 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
816
817 /* Count needed descriptors */
818 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
819 netdev_dbg(dev, "not enough descriptors for TSO!\n");
820 return -EBUSY;
821 }
822
823 /* Initialize the TSO handler, and prepare the first payload */
824 tso_start(skb, &tso);
825
826 total_len = skb->len - hdr_len;
827 while (total_len > 0) {
828 char *hdr;
829
830 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
831 total_len -= data_left;
832 desc_count++;
833
834 /* prepare packet headers: MAC + IP + TCP */
835 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
836 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
837 txq_put_hdr_tso(skb, txq, data_left);
838
839 while (data_left > 0) {
840 int size;
841 desc_count++;
842
843 size = min_t(int, tso.size, data_left);
844 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
845 size == data_left,
846 total_len == 0);
847 if (ret)
848 goto err_release;
849 data_left -= size;
850 tso_build_data(skb, &tso, size);
851 }
852 }
853
854 __skb_queue_tail(&txq->tx_skb, skb);
855 skb_tx_timestamp(skb);
856
857 /* clear TX_END status */
858 mp->work_tx_end &= ~(1 << txq->index);
859
860 /* ensure all descriptors are written before poking hardware */
861 wmb();
862 txq_enable(txq);
863 txq->tx_desc_count += desc_count;
864 return 0;
865err_release:
866 /* TODO: Release all used data descriptors; header descriptors must not
867 * be DMA-unmapped.
868 */
869 return ret;
870}
871
664static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 872static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
665{ 873{
666 struct mv643xx_eth_private *mp = txq_to_mp(txq); 874 struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
671 skb_frag_t *this_frag; 879 skb_frag_t *this_frag;
672 int tx_index; 880 int tx_index;
673 struct tx_desc *desc; 881 struct tx_desc *desc;
882 void *addr;
674 883
675 this_frag = &skb_shinfo(skb)->frags[frag]; 884 this_frag = &skb_shinfo(skb)->frags[frag];
885 addr = page_address(this_frag->page.p) + this_frag->page_offset;
676 tx_index = txq->tx_curr_desc++; 886 tx_index = txq->tx_curr_desc++;
677 if (txq->tx_curr_desc == txq->tx_ring_size) 887 if (txq->tx_curr_desc == txq->tx_ring_size)
678 txq->tx_curr_desc = 0; 888 txq->tx_curr_desc = 0;
@@ -692,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
692 902
693 desc->l4i_chk = 0; 903 desc->l4i_chk = 0;
694 desc->byte_cnt = skb_frag_size(this_frag); 904 desc->byte_cnt = skb_frag_size(this_frag);
695 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, 905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
696 this_frag, 0, 906 desc->byte_cnt, DMA_TO_DEVICE);
697 skb_frag_size(this_frag),
698 DMA_TO_DEVICE);
699 } 907 }
700} 908}
701 909
702static inline __be16 sum16_as_be(__sum16 sum) 910static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
703{ 911 struct net_device *dev)
704 return (__force __be16)sum;
705}
706
707static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
708{ 912{
709 struct mv643xx_eth_private *mp = txq_to_mp(txq); 913 struct mv643xx_eth_private *mp = txq_to_mp(txq);
710 int nr_frags = skb_shinfo(skb)->nr_frags; 914 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -712,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
712 struct tx_desc *desc; 916 struct tx_desc *desc;
713 u32 cmd_sts; 917 u32 cmd_sts;
714 u16 l4i_chk; 918 u16 l4i_chk;
715 int length; 919 int length, ret;
716 920
717 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 921 cmd_sts = 0;
718 l4i_chk = 0; 922 l4i_chk = 0;
719 923
720 if (skb->ip_summed == CHECKSUM_PARTIAL) { 924 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
721 int hdr_len; 925 if (net_ratelimit())
722 int tag_bytes; 926 netdev_err(dev, "tx queue full?!\n");
723 927 return -EBUSY;
724 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
725 skb->protocol != htons(ETH_P_8021Q));
726
727 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
728 tag_bytes = hdr_len - ETH_HLEN;
729 if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
730 unlikely(tag_bytes & ~12)) {
731 if (skb_checksum_help(skb) == 0)
732 goto no_csum;
733 dev_kfree_skb_any(skb);
734 return 1;
735 }
736
737 if (tag_bytes & 4)
738 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
739 if (tag_bytes & 8)
740 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
741
742 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
743 GEN_IP_V4_CHECKSUM |
744 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
745
746 switch (ip_hdr(skb)->protocol) {
747 case IPPROTO_UDP:
748 cmd_sts |= UDP_FRAME;
749 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
750 break;
751 case IPPROTO_TCP:
752 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
753 break;
754 default:
755 BUG();
756 }
757 } else {
758no_csum:
759 /* Errata BTS #50, IHL must be 5 if no HW checksum */
760 cmd_sts |= 5 << TX_IHL_SHIFT;
761 } 928 }
762 929
930 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
931 if (ret)
932 return ret;
933 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
934
763 tx_index = txq->tx_curr_desc++; 935 tx_index = txq->tx_curr_desc++;
764 if (txq->tx_curr_desc == txq->tx_ring_size) 936 if (txq->tx_curr_desc == txq->tx_ring_size)
765 txq->tx_curr_desc = 0; 937 txq->tx_curr_desc = 0;
@@ -801,7 +973,7 @@ no_csum:
801static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 973static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
802{ 974{
803 struct mv643xx_eth_private *mp = netdev_priv(dev); 975 struct mv643xx_eth_private *mp = netdev_priv(dev);
804 int length, queue; 976 int length, queue, ret;
805 struct tx_queue *txq; 977 struct tx_queue *txq;
806 struct netdev_queue *nq; 978 struct netdev_queue *nq;
807 979
@@ -810,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
810 nq = netdev_get_tx_queue(dev, queue); 982 nq = netdev_get_tx_queue(dev, queue);
811 983
812 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 984 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
813 txq->tx_dropped++;
814 netdev_printk(KERN_DEBUG, dev, 985 netdev_printk(KERN_DEBUG, dev,
815 "failed to linearize skb with tiny unaligned fragment\n"); 986 "failed to linearize skb with tiny unaligned fragment\n");
816 return NETDEV_TX_BUSY; 987 return NETDEV_TX_BUSY;
817 } 988 }
818 989
819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
820 if (net_ratelimit())
821 netdev_err(dev, "tx queue full?!\n");
822 dev_kfree_skb_any(skb);
823 return NETDEV_TX_OK;
824 }
825
826 length = skb->len; 990 length = skb->len;
827 991
828 if (!txq_submit_skb(txq, skb)) { 992 if (skb_is_gso(skb))
829 int entries_left; 993 ret = txq_submit_tso(txq, skb, dev);
830 994 else
995 ret = txq_submit_skb(txq, skb, dev);
996 if (!ret) {
831 txq->tx_bytes += length; 997 txq->tx_bytes += length;
832 txq->tx_packets++; 998 txq->tx_packets++;
833 999
834 entries_left = txq->tx_ring_size - txq->tx_desc_count; 1000 if (txq->tx_desc_count >= txq->tx_stop_threshold)
835 if (entries_left < MAX_SKB_FRAGS + 1)
836 netif_tx_stop_queue(nq); 1001 netif_tx_stop_queue(nq);
1002 } else {
1003 txq->tx_dropped++;
1004 dev_kfree_skb_any(skb);
837 } 1005 }
838 1006
839 return NETDEV_TX_OK; 1007 return NETDEV_TX_OK;
@@ -907,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
907 mp->dev->stats.tx_errors++; 1075 mp->dev->stats.tx_errors++;
908 } 1076 }
909 1077
910 if (cmd_sts & TX_FIRST_DESC) { 1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
911 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 1079 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
912 desc->byte_cnt, DMA_TO_DEVICE); 1080 desc->byte_cnt, DMA_TO_DEVICE);
913 } else {
914 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
915 desc->byte_cnt, DMA_TO_DEVICE);
916 }
917
918 dev_kfree_skb(skb); 1081 dev_kfree_skb(skb);
919 } 1082 }
920 1083
@@ -1010,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1010 1173
1011 1174
1012/* mii management interface *************************************************/ 1175/* mii management interface *************************************************/
1013static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) 1176static void mv643xx_eth_adjust_link(struct net_device *dev)
1014{ 1177{
1178 struct mv643xx_eth_private *mp = netdev_priv(dev);
1015 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 1179 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1016 u32 autoneg_disable = FORCE_LINK_PASS | 1180 u32 autoneg_disable = FORCE_LINK_PASS |
1017 DISABLE_AUTO_NEG_SPEED_GMII | 1181 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1387 1551
1388 ret = phy_ethtool_sset(mp->phy, cmd); 1552 ret = phy_ethtool_sset(mp->phy, cmd);
1389 if (!ret) 1553 if (!ret)
1390 mv643xx_adjust_pscr(mp); 1554 mv643xx_eth_adjust_link(dev);
1391 return ret; 1555 return ret;
1392} 1556}
1393 1557
@@ -1456,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1456 return -EINVAL; 1620 return -EINVAL;
1457 1621
1458 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1622 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1459 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1623 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1624 MV643XX_MAX_SKB_DESCS * 2, 4096);
1625 if (mp->tx_ring_size != er->tx_pending)
1626 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1627 mp->tx_ring_size, er->tx_pending);
1460 1628
1461 if (netif_running(dev)) { 1629 if (netif_running(dev)) {
1462 mv643xx_eth_stop(dev); 1630 mv643xx_eth_stop(dev);
@@ -1832,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1832 2000
1833 txq->tx_ring_size = mp->tx_ring_size; 2001 txq->tx_ring_size = mp->tx_ring_size;
1834 2002
2003 /* A queue must always have room for at least one skb.
2004 * Therefore, stop the queue when the free entries reaches
2005 * the maximum number of descriptors per skb.
2006 */
2007 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2008 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2009
1835 txq->tx_desc_count = 0; 2010 txq->tx_desc_count = 0;
1836 txq->tx_curr_desc = 0; 2011 txq->tx_curr_desc = 0;
1837 txq->tx_used_desc = 0; 2012 txq->tx_used_desc = 0;
@@ -1871,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1871 nexti * sizeof(struct tx_desc); 2046 nexti * sizeof(struct tx_desc);
1872 } 2047 }
1873 2048
2049 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2050 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2051 txq->tx_ring_size * TSO_HEADER_SIZE,
2052 &txq->tso_hdrs_dma, GFP_KERNEL);
2053 if (txq->tso_hdrs == NULL) {
2054 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2055 txq->tx_desc_area, txq->tx_desc_dma);
2056 return -ENOMEM;
2057 }
1874 skb_queue_head_init(&txq->tx_skb); 2058 skb_queue_head_init(&txq->tx_skb);
1875 2059
1876 return 0; 2060 return 0;
@@ -1891,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq)
1891 else 2075 else
1892 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2076 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
1893 txq->tx_desc_area, txq->tx_desc_dma); 2077 txq->tx_desc_area, txq->tx_desc_dma);
2078 if (txq->tso_hdrs)
2079 dma_free_coherent(mp->dev->dev.parent,
2080 txq->tx_ring_size * TSO_HEADER_SIZE,
2081 txq->tso_hdrs, txq->tso_hdrs_dma);
1894} 2082}
1895 2083
1896 2084
@@ -2303,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2303 2491
2304 ret = phy_mii_ioctl(mp->phy, ifr, cmd); 2492 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2305 if (!ret) 2493 if (!ret)
2306 mv643xx_adjust_pscr(mp); 2494 mv643xx_eth_adjust_link(dev);
2307 return ret; 2495 return ret;
2308} 2496}
2309 2497
@@ -2678,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp,
2678 struct mv643xx_eth_platform_data *pd) 2866 struct mv643xx_eth_platform_data *pd)
2679{ 2867{
2680 struct net_device *dev = mp->dev; 2868 struct net_device *dev = mp->dev;
2869 unsigned int tx_ring_size;
2681 2870
2682 if (is_valid_ether_addr(pd->mac_addr)) 2871 if (is_valid_ether_addr(pd->mac_addr))
2683 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2872 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
@@ -2692,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp,
2692 2881
2693 mp->rxq_count = pd->rx_queue_count ? : 1; 2882 mp->rxq_count = pd->rx_queue_count ? : 1;
2694 2883
2695 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2884 tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2696 if (pd->tx_queue_size) 2885 if (pd->tx_queue_size)
2697 mp->tx_ring_size = pd->tx_queue_size; 2886 tx_ring_size = pd->tx_queue_size;
2887
2888 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2889 MV643XX_MAX_SKB_DESCS * 2, 4096);
2890 if (mp->tx_ring_size != tx_ring_size)
2891 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2892 mp->tx_ring_size, tx_ring_size);
2893
2698 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2894 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2699 mp->tx_desc_sram_size = pd->tx_sram_size; 2895 mp->tx_desc_sram_size = pd->tx_sram_size;
2700 2896
2701 mp->txq_count = pd->tx_queue_count ? : 1; 2897 mp->txq_count = pd->tx_queue_count ? : 1;
2702} 2898}
2703 2899
2704static void mv643xx_eth_adjust_link(struct net_device *dev)
2705{
2706 struct mv643xx_eth_private *mp = netdev_priv(dev);
2707
2708 mv643xx_adjust_pscr(mp);
2709}
2710
2711static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2900static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2712 int phy_addr) 2901 int phy_addr)
2713{ 2902{
@@ -2889,7 +3078,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2889 if (err) 3078 if (err)
2890 goto out; 3079 goto out;
2891 3080
2892 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 3081 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
2893 3082
2894 init_pscr(mp, pd->speed, pd->duplex); 3083 init_pscr(mp, pd->speed, pd->duplex);
2895 3084
@@ -2921,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2921 dev->watchdog_timeo = 2 * HZ; 3110 dev->watchdog_timeo = 2 * HZ;
2922 dev->base_addr = 0; 3111 dev->base_addr = 0;
2923 3112
2924 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 3113 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2925 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 3114 dev->vlan_features = dev->features;
2926 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 3115
3116 dev->features |= NETIF_F_RXCSUM;
3117 dev->hw_features = dev->features;
2927 3118
2928 dev->priv_flags |= IFF_UNICAST_FLT; 3119 dev->priv_flags |= IFF_UNICAST_FLT;
3120 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
2929 3121
2930 SET_NETDEV_DEV(dev, &pdev->dev); 3122 SET_NETDEV_DEV(dev, &pdev->dev);
2931 3123
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced263a5e..fc2fb25343f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
195 return -ENODEV; 195 return -ENODEV;
196 } 196 }
197 197
198 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); 198 bus = devm_mdiobus_alloc_size(&pdev->dev,
199 if (!bus) { 199 sizeof(struct orion_mdio_dev));
200 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); 200 if (!bus)
201 return -ENOMEM; 201 return -ENOMEM;
202 }
203 202
204 bus->name = "orion_mdio_bus"; 203 bus->name = "orion_mdio_bus";
205 bus->read = orion_mdio_read; 204 bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
208 dev_name(&pdev->dev)); 207 dev_name(&pdev->dev));
209 bus->parent = &pdev->dev; 208 bus->parent = &pdev->dev;
210 209
211 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 210 bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
212 if (!bus->irq) { 211 GFP_KERNEL);
213 mdiobus_free(bus); 212 if (!bus->irq)
214 return -ENOMEM; 213 return -ENOMEM;
215 }
216 214
217 for (i = 0; i < PHY_MAX_ADDR; i++) 215 for (i = 0; i < PHY_MAX_ADDR; i++)
218 bus->irq[i] = PHY_POLL; 216 bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
264out_mdio: 262out_mdio:
265 if (!IS_ERR(dev->clk)) 263 if (!IS_ERR(dev->clk))
266 clk_disable_unprepare(dev->clk); 264 clk_disable_unprepare(dev->clk);
267 kfree(bus->irq);
268 mdiobus_free(bus);
269 return ret; 265 return ret;
270} 266}
271 267
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
276 272
277 writel(0, dev->regs + MVMDIO_ERR_INT_MASK); 273 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
278 mdiobus_unregister(bus); 274 mdiobus_unregister(bus);
279 kfree(bus->irq);
280 mdiobus_free(bus);
281 if (!IS_ERR(dev->clk)) 275 if (!IS_ERR(dev->clk))
282 clk_disable_unprepare(dev->clk); 276 clk_disable_unprepare(dev->clk);
283 277
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 14786c8bf99e..45beca17fa50 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -23,6 +23,7 @@
23#include <net/ip.h> 23#include <net/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <net/tso.h>
26#include <linux/of.h> 27#include <linux/of.h>
27#include <linux/of_irq.h> 28#include <linux/of_irq.h>
28#include <linux/of_mdio.h> 29#include <linux/of_mdio.h>
@@ -218,9 +219,6 @@
218#define MVNETA_RX_COAL_PKTS 32 219#define MVNETA_RX_COAL_PKTS 32
219#define MVNETA_RX_COAL_USEC 100 220#define MVNETA_RX_COAL_USEC 100
220 221
221/* Napi polling weight */
222#define MVNETA_RX_POLL_WEIGHT 64
223
224/* The two bytes Marvell header. Either contains a special value used 222/* The two bytes Marvell header. Either contains a special value used
225 * by Marvell switches when a specific hardware mode is enabled (not 223 * by Marvell switches when a specific hardware mode is enabled (not
226 * supported by this driver) or is filled automatically by zeroes on 224 * supported by this driver) or is filled automatically by zeroes on
@@ -244,12 +242,20 @@
244 242
245#define MVNETA_TX_MTU_MAX 0x3ffff 243#define MVNETA_TX_MTU_MAX 0x3ffff
246 244
245/* TSO header size */
246#define TSO_HEADER_SIZE 128
247
247/* Max number of Rx descriptors */ 248/* Max number of Rx descriptors */
248#define MVNETA_MAX_RXD 128 249#define MVNETA_MAX_RXD 128
249 250
250/* Max number of Tx descriptors */ 251/* Max number of Tx descriptors */
251#define MVNETA_MAX_TXD 532 252#define MVNETA_MAX_TXD 532
252 253
254/* Max number of allowed TCP segments for software TSO */
255#define MVNETA_MAX_TSO_SEGS 100
256
257#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
258
253/* descriptor aligned size */ 259/* descriptor aligned size */
254#define MVNETA_DESC_ALIGNED_SIZE 32 260#define MVNETA_DESC_ALIGNED_SIZE 32
255 261
@@ -258,6 +264,10 @@
258 ETH_HLEN + ETH_FCS_LEN, \ 264 ETH_HLEN + ETH_FCS_LEN, \
259 MVNETA_CPU_D_CACHE_LINE_SIZE) 265 MVNETA_CPU_D_CACHE_LINE_SIZE)
260 266
267#define IS_TSO_HEADER(txq, addr) \
268 ((addr >= txq->tso_hdrs_phys) && \
269 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
270
261#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 271#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
262 272
263struct mvneta_pcpu_stats { 273struct mvneta_pcpu_stats {
@@ -279,9 +289,6 @@ struct mvneta_port {
279 u32 cause_rx_tx; 289 u32 cause_rx_tx;
280 struct napi_struct napi; 290 struct napi_struct napi;
281 291
282 /* Napi weight */
283 int weight;
284
285 /* Core clock */ 292 /* Core clock */
286 struct clk *clk; 293 struct clk *clk;
287 u8 mcast_count[256]; 294 u8 mcast_count[256];
@@ -390,6 +397,8 @@ struct mvneta_tx_queue {
390 * descriptor ring 397 * descriptor ring
391 */ 398 */
392 int count; 399 int count;
400 int tx_stop_threshold;
401 int tx_wake_threshold;
393 402
394 /* Array of transmitted skb */ 403 /* Array of transmitted skb */
395 struct sk_buff **tx_skb; 404 struct sk_buff **tx_skb;
@@ -413,6 +422,12 @@ struct mvneta_tx_queue {
413 422
414 /* Index of the next TX DMA descriptor to process */ 423 /* Index of the next TX DMA descriptor to process */
415 int next_desc_to_proc; 424 int next_desc_to_proc;
425
426 /* DMA buffers for TSO headers */
427 char *tso_hdrs;
428
429 /* DMA address of TSO headers */
430 dma_addr_t tso_hdrs_phys;
416}; 431};
417 432
418struct mvneta_rx_queue { 433struct mvneta_rx_queue {
@@ -441,7 +456,10 @@ struct mvneta_rx_queue {
441 int next_desc_to_proc; 456 int next_desc_to_proc;
442}; 457};
443 458
444static int rxq_number = 8; 459/* The hardware supports eight (8) rx queues, but we are only allowing
460 * the first one to be used. Therefore, let's just allocate one queue.
461 */
462static int rxq_number = 1;
445static int txq_number = 8; 463static int txq_number = 8;
446 464
447static int rxq_def; 465static int rxq_def;
@@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1277 1295
1278 mvneta_txq_inc_get(txq); 1296 mvneta_txq_inc_get(txq);
1279 1297
1298 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1299 dma_unmap_single(pp->dev->dev.parent,
1300 tx_desc->buf_phys_addr,
1301 tx_desc->data_size, DMA_TO_DEVICE);
1280 if (!skb) 1302 if (!skb)
1281 continue; 1303 continue;
1282
1283 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1284 tx_desc->data_size, DMA_TO_DEVICE);
1285 dev_kfree_skb_any(skb); 1304 dev_kfree_skb_any(skb);
1286 } 1305 }
1287} 1306}
@@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
1302 txq->count -= tx_done; 1321 txq->count -= tx_done;
1303 1322
1304 if (netif_tx_queue_stopped(nq)) { 1323 if (netif_tx_queue_stopped(nq)) {
1305 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) 1324 if (txq->count <= txq->tx_wake_threshold)
1306 netif_tx_wake_queue(nq); 1325 netif_tx_wake_queue(nq);
1307 } 1326 }
1308} 1327}
@@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1519 return rx_done; 1538 return rx_done;
1520} 1539}
1521 1540
1541static inline void
1542mvneta_tso_put_hdr(struct sk_buff *skb,
1543 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1544{
1545 struct mvneta_tx_desc *tx_desc;
1546 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1547
1548 txq->tx_skb[txq->txq_put_index] = NULL;
1549 tx_desc = mvneta_txq_next_desc_get(txq);
1550 tx_desc->data_size = hdr_len;
1551 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1552 tx_desc->command |= MVNETA_TXD_F_DESC;
1553 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1554 txq->txq_put_index * TSO_HEADER_SIZE;
1555 mvneta_txq_inc_put(txq);
1556}
1557
1558static inline int
1559mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1560 struct sk_buff *skb, char *data, int size,
1561 bool last_tcp, bool is_last)
1562{
1563 struct mvneta_tx_desc *tx_desc;
1564
1565 tx_desc = mvneta_txq_next_desc_get(txq);
1566 tx_desc->data_size = size;
1567 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1568 size, DMA_TO_DEVICE);
1569 if (unlikely(dma_mapping_error(dev->dev.parent,
1570 tx_desc->buf_phys_addr))) {
1571 mvneta_txq_desc_put(txq);
1572 return -ENOMEM;
1573 }
1574
1575 tx_desc->command = 0;
1576 txq->tx_skb[txq->txq_put_index] = NULL;
1577
1578 if (last_tcp) {
1579 /* last descriptor in the TCP packet */
1580 tx_desc->command = MVNETA_TXD_L_DESC;
1581
1582 /* last descriptor in SKB */
1583 if (is_last)
1584 txq->tx_skb[txq->txq_put_index] = skb;
1585 }
1586 mvneta_txq_inc_put(txq);
1587 return 0;
1588}
1589
1590static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1591 struct mvneta_tx_queue *txq)
1592{
1593 int total_len, data_left;
1594 int desc_count = 0;
1595 struct mvneta_port *pp = netdev_priv(dev);
1596 struct tso_t tso;
1597 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1598 int i;
1599
1600 /* Count needed descriptors */
1601 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1602 return 0;
1603
1604 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1605 pr_info("*** Is this even possible???!?!?\n");
1606 return 0;
1607 }
1608
1609 /* Initialize the TSO handler, and prepare the first payload */
1610 tso_start(skb, &tso);
1611
1612 total_len = skb->len - hdr_len;
1613 while (total_len > 0) {
1614 char *hdr;
1615
1616 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1617 total_len -= data_left;
1618 desc_count++;
1619
1620 /* prepare packet headers: MAC + IP + TCP */
1621 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1622 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1623
1624 mvneta_tso_put_hdr(skb, pp, txq);
1625
1626 while (data_left > 0) {
1627 int size;
1628 desc_count++;
1629
1630 size = min_t(int, tso.size, data_left);
1631
1632 if (mvneta_tso_put_data(dev, txq, skb,
1633 tso.data, size,
1634 size == data_left,
1635 total_len == 0))
1636 goto err_release;
1637 data_left -= size;
1638
1639 tso_build_data(skb, &tso, size);
1640 }
1641 }
1642
1643 return desc_count;
1644
1645err_release:
1646 /* Release all used data descriptors; header descriptors must not
1647 * be DMA-unmapped.
1648 */
1649 for (i = desc_count - 1; i >= 0; i--) {
1650 struct mvneta_tx_desc *tx_desc = txq->descs + i;
1651 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1652 dma_unmap_single(pp->dev->dev.parent,
1653 tx_desc->buf_phys_addr,
1654 tx_desc->data_size,
1655 DMA_TO_DEVICE);
1656 mvneta_txq_desc_put(txq);
1657 }
1658 return 0;
1659}
1660
1522/* Handle tx fragmentation processing */ 1661/* Handle tx fragmentation processing */
1523static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1662static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1524 struct mvneta_tx_queue *txq) 1663 struct mvneta_tx_queue *txq)
1525{ 1664{
1526 struct mvneta_tx_desc *tx_desc; 1665 struct mvneta_tx_desc *tx_desc;
1527 int i; 1666 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1528 1667
1529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1668 for (i = 0; i < nr_frags; i++) {
1530 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1669 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1531 void *addr = page_address(frag->page.p) + frag->page_offset; 1670 void *addr = page_address(frag->page.p) + frag->page_offset;
1532 1671
@@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1543 goto error; 1682 goto error;
1544 } 1683 }
1545 1684
1546 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 1685 if (i == nr_frags - 1) {
1547 /* Last descriptor */ 1686 /* Last descriptor */
1548 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1687 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1549
1550 txq->tx_skb[txq->txq_put_index] = skb; 1688 txq->tx_skb[txq->txq_put_index] = skb;
1551
1552 mvneta_txq_inc_put(txq);
1553 } else { 1689 } else {
1554 /* Descriptor in the middle: Not First, Not Last */ 1690 /* Descriptor in the middle: Not First, Not Last */
1555 tx_desc->command = 0; 1691 tx_desc->command = 0;
1556
1557 txq->tx_skb[txq->txq_put_index] = NULL; 1692 txq->tx_skb[txq->txq_put_index] = NULL;
1558 mvneta_txq_inc_put(txq);
1559 } 1693 }
1694 mvneta_txq_inc_put(txq);
1560 } 1695 }
1561 1696
1562 return 0; 1697 return 0;
@@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1584 u16 txq_id = skb_get_queue_mapping(skb); 1719 u16 txq_id = skb_get_queue_mapping(skb);
1585 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1720 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1586 struct mvneta_tx_desc *tx_desc; 1721 struct mvneta_tx_desc *tx_desc;
1587 struct netdev_queue *nq;
1588 int frags = 0; 1722 int frags = 0;
1589 u32 tx_cmd; 1723 u32 tx_cmd;
1590 1724
1591 if (!netif_running(dev)) 1725 if (!netif_running(dev))
1592 goto out; 1726 goto out;
1593 1727
1728 if (skb_is_gso(skb)) {
1729 frags = mvneta_tx_tso(skb, dev, txq);
1730 goto out;
1731 }
1732
1594 frags = skb_shinfo(skb)->nr_frags + 1; 1733 frags = skb_shinfo(skb)->nr_frags + 1;
1595 nq = netdev_get_tx_queue(dev, txq_id);
1596 1734
1597 /* Get a descriptor for the first part of the packet */ 1735 /* Get a descriptor for the first part of the packet */
1598 tx_desc = mvneta_txq_next_desc_get(txq); 1736 tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1635 } 1773 }
1636 } 1774 }
1637 1775
1638 txq->count += frags;
1639 mvneta_txq_pend_desc_add(pp, txq, frags);
1640
1641 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1642 netif_tx_stop_queue(nq);
1643
1644out: 1776out:
1645 if (frags > 0) { 1777 if (frags > 0) {
1646 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1778 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1779 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1780
1781 txq->count += frags;
1782 mvneta_txq_pend_desc_add(pp, txq, frags);
1783
1784 if (txq->count >= txq->tx_stop_threshold)
1785 netif_tx_stop_queue(nq);
1647 1786
1648 u64_stats_update_begin(&stats->syncp); 1787 u64_stats_update_begin(&stats->syncp);
1649 stats->tx_packets++; 1788 stats->tx_packets++;
@@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
2003{ 2142{
2004 int queue; 2143 int queue;
2005 2144
2006 /* free the skb's in the hal tx ring */ 2145 /* free the skb's in the tx ring */
2007 for (queue = 0; queue < txq_number; queue++) 2146 for (queue = 0; queue < txq_number; queue++)
2008 mvneta_txq_done_force(pp, &pp->txqs[queue]); 2147 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2009 2148
@@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2081{ 2220{
2082 txq->size = pp->tx_ring_size; 2221 txq->size = pp->tx_ring_size;
2083 2222
2223 /* A queue must always have room for at least one skb.
2224 * Therefore, stop the queue when the free entries reaches
2225 * the maximum number of descriptors per skb.
2226 */
2227 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2228 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2229
2230
2084 /* Allocate memory for TX descriptors */ 2231 /* Allocate memory for TX descriptors */
2085 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2232 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2086 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2233 txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2109 txq->descs, txq->descs_phys); 2256 txq->descs, txq->descs_phys);
2110 return -ENOMEM; 2257 return -ENOMEM;
2111 } 2258 }
2259
2260 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2261 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2262 txq->size * TSO_HEADER_SIZE,
2263 &txq->tso_hdrs_phys, GFP_KERNEL);
2264 if (txq->tso_hdrs == NULL) {
2265 kfree(txq->tx_skb);
2266 dma_free_coherent(pp->dev->dev.parent,
2267 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2268 txq->descs, txq->descs_phys);
2269 return -ENOMEM;
2270 }
2112 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2271 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2113 2272
2114 return 0; 2273 return 0;
@@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
2120{ 2279{
2121 kfree(txq->tx_skb); 2280 kfree(txq->tx_skb);
2122 2281
2282 if (txq->tso_hdrs)
2283 dma_free_coherent(pp->dev->dev.parent,
2284 txq->size * TSO_HEADER_SIZE,
2285 txq->tso_hdrs, txq->tso_hdrs_phys);
2123 if (txq->descs) 2286 if (txq->descs)
2124 dma_free_coherent(pp->dev->dev.parent, 2287 dma_free_coherent(pp->dev->dev.parent,
2125 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2288 txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2279 return 0; 2442 return 0;
2280 2443
2281 /* The interface is running, so we have to force a 2444 /* The interface is running, so we have to force a
2282 * reallocation of the RXQs 2445 * reallocation of the queues
2283 */ 2446 */
2284 mvneta_stop_dev(pp); 2447 mvneta_stop_dev(pp);
2285 2448
2286 mvneta_cleanup_txqs(pp); 2449 mvneta_cleanup_txqs(pp);
2287 mvneta_cleanup_rxqs(pp); 2450 mvneta_cleanup_rxqs(pp);
2288 2451
2289 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2452 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2290 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2453 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2291 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2454 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2292 2455
2293 ret = mvneta_setup_rxqs(pp); 2456 ret = mvneta_setup_rxqs(pp);
2294 if (ret) { 2457 if (ret) {
2295 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); 2458 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2296 return ret; 2459 return ret;
2297 } 2460 }
2298 2461
2299 mvneta_setup_txqs(pp); 2462 ret = mvneta_setup_txqs(pp);
2463 if (ret) {
2464 netdev_err(dev, "unable to setup txqs after MTU change\n");
2465 return ret;
2466 }
2300 2467
2301 mvneta_start_dev(pp); 2468 mvneta_start_dev(pp);
2302 mvneta_port_up(pp); 2469 mvneta_port_up(pp);
@@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2323static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2490static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2324{ 2491{
2325 struct mvneta_port *pp = netdev_priv(dev); 2492 struct mvneta_port *pp = netdev_priv(dev);
2326 u8 *mac = addr + 2; 2493 struct sockaddr *sockaddr = addr;
2327 int i; 2494 int ret;
2328
2329 if (netif_running(dev))
2330 return -EBUSY;
2331 2495
2496 ret = eth_prepare_mac_addr_change(dev, addr);
2497 if (ret < 0)
2498 return ret;
2332 /* Remove previous address table entry */ 2499 /* Remove previous address table entry */
2333 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2500 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2334 2501
2335 /* Set new addr in hw */ 2502 /* Set new addr in hw */
2336 mvneta_mac_addr_set(pp, mac, rxq_def); 2503 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2337
2338 /* Set addr in the device */
2339 for (i = 0; i < ETH_ALEN; i++)
2340 dev->dev_addr[i] = mac[i];
2341 2504
2505 eth_commit_mac_addr_change(dev, addr);
2342 return 0; 2506 return 0;
2343} 2507}
2344 2508
@@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev)
2433 struct mvneta_port *pp = netdev_priv(dev); 2597 struct mvneta_port *pp = netdev_priv(dev);
2434 int ret; 2598 int ret;
2435 2599
2436 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2437
2438 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2600 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2439 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2601 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2440 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2602 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2600 return -EINVAL; 2762 return -EINVAL;
2601 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 2763 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2602 ring->rx_pending : MVNETA_MAX_RXD; 2764 ring->rx_pending : MVNETA_MAX_RXD;
2603 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? 2765
2604 ring->tx_pending : MVNETA_MAX_TXD; 2766 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2767 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2768 if (pp->tx_ring_size != ring->tx_pending)
2769 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2770 pp->tx_ring_size, ring->tx_pending);
2605 2771
2606 if (netif_running(dev)) { 2772 if (netif_running(dev)) {
2607 mvneta_stop(dev); 2773 mvneta_stop(dev);
@@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
2638}; 2804};
2639 2805
2640/* Initialize hw */ 2806/* Initialize hw */
2641static int mvneta_init(struct mvneta_port *pp, int phy_addr) 2807static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2642{ 2808{
2643 int queue; 2809 int queue;
2644 2810
@@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2648 /* Set port default values */ 2814 /* Set port default values */
2649 mvneta_defaults_set(pp); 2815 mvneta_defaults_set(pp);
2650 2816
2651 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), 2817 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2652 GFP_KERNEL); 2818 GFP_KERNEL);
2653 if (!pp->txqs) 2819 if (!pp->txqs)
2654 return -ENOMEM; 2820 return -ENOMEM;
2655 2821
@@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2661 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2827 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2662 } 2828 }
2663 2829
2664 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), 2830 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2665 GFP_KERNEL); 2831 GFP_KERNEL);
2666 if (!pp->rxqs) { 2832 if (!pp->rxqs)
2667 kfree(pp->txqs);
2668 return -ENOMEM; 2833 return -ENOMEM;
2669 }
2670 2834
2671 /* Create Rx descriptor rings */ 2835 /* Create Rx descriptor rings */
2672 for (queue = 0; queue < rxq_number; queue++) { 2836 for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2680 return 0; 2844 return 0;
2681} 2845}
2682 2846
2683static void mvneta_deinit(struct mvneta_port *pp)
2684{
2685 kfree(pp->txqs);
2686 kfree(pp->rxqs);
2687}
2688
2689/* platform glue : initialize decoding windows */ 2847/* platform glue : initialize decoding windows */
2690static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2848static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2691 const struct mbus_dram_target_info *dram) 2849 const struct mbus_dram_target_info *dram)
@@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev)
2768 struct resource *res; 2926 struct resource *res;
2769 struct device_node *dn = pdev->dev.of_node; 2927 struct device_node *dn = pdev->dev.of_node;
2770 struct device_node *phy_node; 2928 struct device_node *phy_node;
2771 u32 phy_addr;
2772 struct mvneta_port *pp; 2929 struct mvneta_port *pp;
2773 struct net_device *dev; 2930 struct net_device *dev;
2774 const char *dt_mac_addr; 2931 const char *dt_mac_addr;
@@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev)
2797 2954
2798 phy_node = of_parse_phandle(dn, "phy", 0); 2955 phy_node = of_parse_phandle(dn, "phy", 0);
2799 if (!phy_node) { 2956 if (!phy_node) {
2800 dev_err(&pdev->dev, "no associated PHY\n"); 2957 if (!of_phy_is_fixed_link(dn)) {
2801 err = -ENODEV; 2958 dev_err(&pdev->dev, "no PHY specified\n");
2802 goto err_free_irq; 2959 err = -ENODEV;
2960 goto err_free_irq;
2961 }
2962
2963 err = of_phy_register_fixed_link(dn);
2964 if (err < 0) {
2965 dev_err(&pdev->dev, "cannot register fixed PHY\n");
2966 goto err_free_irq;
2967 }
2968
2969 /* In the case of a fixed PHY, the DT node associated
2970 * to the PHY is the Ethernet MAC DT node.
2971 */
2972 phy_node = dn;
2803 } 2973 }
2804 2974
2805 phy_mode = of_get_phy_mode(dn); 2975 phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev)
2813 dev->watchdog_timeo = 5 * HZ; 2983 dev->watchdog_timeo = 5 * HZ;
2814 dev->netdev_ops = &mvneta_netdev_ops; 2984 dev->netdev_ops = &mvneta_netdev_ops;
2815 2985
2816 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); 2986 dev->ethtool_ops = &mvneta_eth_tool_ops;
2817 2987
2818 pp = netdev_priv(dev); 2988 pp = netdev_priv(dev);
2819
2820 pp->weight = MVNETA_RX_POLL_WEIGHT;
2821 pp->phy_node = phy_node; 2989 pp->phy_node = phy_node;
2822 pp->phy_interface = phy_mode; 2990 pp->phy_interface = phy_mode;
2823 2991
@@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev)
2864 pp->dev = dev; 3032 pp->dev = dev;
2865 SET_NETDEV_DEV(dev, &pdev->dev); 3033 SET_NETDEV_DEV(dev, &pdev->dev);
2866 3034
2867 err = mvneta_init(pp, phy_addr); 3035 err = mvneta_init(&pdev->dev, pp);
2868 if (err < 0) { 3036 if (err < 0)
2869 dev_err(&pdev->dev, "can't init eth hal\n");
2870 goto err_free_stats; 3037 goto err_free_stats;
2871 }
2872 3038
2873 err = mvneta_port_power_up(pp, phy_mode); 3039 err = mvneta_port_power_up(pp, phy_mode);
2874 if (err < 0) { 3040 if (err < 0) {
2875 dev_err(&pdev->dev, "can't power up port\n"); 3041 dev_err(&pdev->dev, "can't power up port\n");
2876 goto err_deinit; 3042 goto err_free_stats;
2877 } 3043 }
2878 3044
2879 dram_target_info = mv_mbus_dram_info(); 3045 dram_target_info = mv_mbus_dram_info();
2880 if (dram_target_info) 3046 if (dram_target_info)
2881 mvneta_conf_mbus_windows(pp, dram_target_info); 3047 mvneta_conf_mbus_windows(pp, dram_target_info);
2882 3048
2883 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 3049 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
2884 3050
2885 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 3051 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2886 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 3052 dev->hw_features |= dev->features;
2887 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 3053 dev->vlan_features |= dev->features;
2888 dev->priv_flags |= IFF_UNICAST_FLT; 3054 dev->priv_flags |= IFF_UNICAST_FLT;
3055 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
2889 3056
2890 err = register_netdev(dev); 3057 err = register_netdev(dev);
2891 if (err < 0) { 3058 if (err < 0) {
2892 dev_err(&pdev->dev, "failed to register\n"); 3059 dev_err(&pdev->dev, "failed to register\n");
2893 goto err_deinit; 3060 goto err_free_stats;
2894 } 3061 }
2895 3062
2896 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 3063 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev)
2900 3067
2901 return 0; 3068 return 0;
2902 3069
2903err_deinit:
2904 mvneta_deinit(pp);
2905err_free_stats: 3070err_free_stats:
2906 free_percpu(pp->stats); 3071 free_percpu(pp->stats);
2907err_clk: 3072err_clk:
@@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev)
2920 struct mvneta_port *pp = netdev_priv(dev); 3085 struct mvneta_port *pp = netdev_priv(dev);
2921 3086
2922 unregister_netdev(dev); 3087 unregister_netdev(dev);
2923 mvneta_deinit(pp);
2924 clk_disable_unprepare(pp->clk); 3088 clk_disable_unprepare(pp->clk);
2925 free_percpu(pp->stats); 3089 free_percpu(pp->stats);
2926 irq_dispose_mapping(dev->irq); 3090 irq_dispose_mapping(dev->irq);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index b358c2f6f4bd..8f5aa7c62b18 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1488 dev->netdev_ops = &pxa168_eth_netdev_ops; 1488 dev->netdev_ops = &pxa168_eth_netdev_ops;
1489 dev->watchdog_timeo = 2 * HZ; 1489 dev->watchdog_timeo = 2 * HZ;
1490 dev->base_addr = 0; 1490 dev->base_addr = 0;
1491 SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); 1491 dev->ethtool_ops = &pxa168_ethtool_ops;
1492 1492
1493 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); 1493 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1494 1494
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b81106451a0a..69693384b58c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4760 4760
4761 SET_NETDEV_DEV(dev, &hw->pdev->dev); 4761 SET_NETDEV_DEV(dev, &hw->pdev->dev);
4762 dev->irq = hw->pdev->irq; 4762 dev->irq = hw->pdev->irq;
4763 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); 4763 dev->ethtool_ops = &sky2_ethtool_ops;
4764 dev->watchdog_timeo = TX_WATCHDOG; 4764 dev->watchdog_timeo = TX_WATCHDOG;
4765 dev->netdev_ops = &sky2_netdev_ops[port]; 4765 dev->netdev_ops = &sky2_netdev_ops[port];
4766 4766
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 29b616990e52..5d940a26055c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
212 212
213 /* First, verify that the master reports correct status */ 213 /* First, verify that the master reports correct status */
214 if (comm_pending(dev)) { 214 if (comm_pending(dev)) {
215 mlx4_warn(dev, "Communication channel is not idle." 215 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
216 "my toggle is %d (cmd:0x%x)\n",
217 priv->cmd.comm_toggle, cmd); 216 priv->cmd.comm_toggle, cmd);
218 return -EAGAIN; 217 return -EAGAIN;
219 } 218 }
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
422 *out_param = 421 *out_param =
423 be64_to_cpu(vhcr->out_param); 422 be64_to_cpu(vhcr->out_param);
424 else { 423 else {
425 mlx4_err(dev, "response expected while" 424 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
426 "output mailbox is NULL for " 425 op);
427 "command 0x%x\n", op);
428 vhcr->status = CMD_STAT_BAD_PARAM; 426 vhcr->status = CMD_STAT_BAD_PARAM;
429 } 427 }
430 } 428 }
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
439 *out_param = 437 *out_param =
440 be64_to_cpu(vhcr->out_param); 438 be64_to_cpu(vhcr->out_param);
441 else { 439 else {
442 mlx4_err(dev, "response expected while" 440 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
443 "output mailbox is NULL for " 441 op);
444 "command 0x%x\n", op);
445 vhcr->status = CMD_STAT_BAD_PARAM; 442 vhcr->status = CMD_STAT_BAD_PARAM;
446 } 443 }
447 } 444 }
448 ret = mlx4_status_to_errno(vhcr->status); 445 ret = mlx4_status_to_errno(vhcr->status);
449 } else 446 } else
450 mlx4_err(dev, "failed execution of VHCR_POST command" 447 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
451 "opcode 0x%x\n", op); 448 op);
452 } 449 }
453 450
454 mutex_unlock(&priv->cmd.slave_cmd_mutex); 451 mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
476 goto out; 473 goto out;
477 } 474 }
478 475
476 if (out_is_imm && !out_param) {
477 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
478 op);
479 err = -EINVAL;
480 goto out;
481 }
482
479 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 483 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
480 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 484 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
481 if (err) 485 if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
554 cmd->free_head = context->next; 558 cmd->free_head = context->next;
555 spin_unlock(&cmd->context_lock); 559 spin_unlock(&cmd->context_lock);
556 560
561 if (out_is_imm && !out_param) {
562 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563 op);
564 err = -EINVAL;
565 goto out;
566 }
567
557 init_completion(&context->done); 568 init_completion(&context->done);
558 569
559 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 570 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
625 636
626 if ((slave_addr & 0xfff) | (master_addr & 0xfff) | 637 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
627 (slave & ~0x7f) | (size & 0xff)) { 638 (slave & ~0x7f) | (size & 0xff)) {
628 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " 639 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
629 "master_addr:0x%llx slave_id:%d size:%d\n", 640 slave_addr, master_addr, slave, size);
630 slave_addr, master_addr, slave, size);
631 return -EINVAL; 641 return -EINVAL;
632 } 642 }
633 643
@@ -1422,8 +1432,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1422 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1432 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1423 MLX4_ACCESS_MEM_ALIGN), 1); 1433 MLX4_ACCESS_MEM_ALIGN), 1);
1424 if (ret) { 1434 if (ret) {
1425 mlx4_err(dev, "%s:Failed reading vhcr" 1435 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1426 "ret: 0x%x\n", __func__, ret); 1436 __func__, ret);
1427 kfree(vhcr); 1437 kfree(vhcr);
1428 return ret; 1438 return ret;
1429 } 1439 }
@@ -1474,9 +1484,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1474 1484
1475 /* Apply permission and bound checks if applicable */ 1485 /* Apply permission and bound checks if applicable */
1476 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { 1486 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1477 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " 1487 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1478 "checks for resource_id:%d\n", vhcr->op, slave, 1488 vhcr->op, slave, vhcr->in_modifier);
1479 vhcr->in_modifier);
1480 vhcr_cmd->status = CMD_STAT_BAD_OP; 1489 vhcr_cmd->status = CMD_STAT_BAD_OP;
1481 goto out_status; 1490 goto out_status;
1482 } 1491 }
@@ -1515,8 +1524,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1515 } 1524 }
1516 1525
1517 if (err) { 1526 if (err) {
1518 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" 1527 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1519 " error:%d, status %d\n",
1520 vhcr->op, slave, vhcr->errno, err); 1528 vhcr->op, slave, vhcr->errno, err);
1521 vhcr_cmd->status = mlx4_errno_to_status(err); 1529 vhcr_cmd->status = mlx4_errno_to_status(err);
1522 goto out_status; 1530 goto out_status;
@@ -1550,8 +1558,8 @@ out_status:
1550 __func__); 1558 __func__);
1551 else if (vhcr->e_bit && 1559 else if (vhcr->e_bit &&
1552 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) 1560 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1553 mlx4_warn(dev, "Failed to generate command completion " 1561 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1554 "eqe for slave %d\n", slave); 1562 slave);
1555 } 1563 }
1556 1564
1557out: 1565out:
@@ -1590,8 +1598,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1590 1598
1591 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", 1599 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1592 slave, port); 1600 slave, port);
1593 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, 1601 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1594 vp_admin->default_qos, vp_admin->link_state); 1602 vp_admin->default_vlan, vp_admin->default_qos,
1603 vp_admin->link_state);
1595 1604
1596 work = kzalloc(sizeof(*work), GFP_KERNEL); 1605 work = kzalloc(sizeof(*work), GFP_KERNEL);
1597 if (!work) 1606 if (!work)
@@ -1604,7 +1613,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1604 &admin_vlan_ix); 1613 &admin_vlan_ix);
1605 if (err) { 1614 if (err) {
1606 kfree(work); 1615 kfree(work);
1607 mlx4_warn((&priv->dev), 1616 mlx4_warn(&priv->dev,
1608 "No vlan resources slave %d, port %d\n", 1617 "No vlan resources slave %d, port %d\n",
1609 slave, port); 1618 slave, port);
1610 return err; 1619 return err;
@@ -1613,7 +1622,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1613 admin_vlan_ix = NO_INDX; 1622 admin_vlan_ix = NO_INDX;
1614 } 1623 }
1615 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1624 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1616 mlx4_dbg((&(priv->dev)), 1625 mlx4_dbg(&priv->dev,
1617 "alloc vlan %d idx %d slave %d port %d\n", 1626 "alloc vlan %d idx %d slave %d port %d\n",
1618 (int)(vp_admin->default_vlan), 1627 (int)(vp_admin->default_vlan),
1619 admin_vlan_ix, slave, port); 1628 admin_vlan_ix, slave, port);
@@ -1676,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1676 vp_admin->default_vlan, &(vp_oper->vlan_idx)); 1685 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1677 if (err) { 1686 if (err) {
1678 vp_oper->vlan_idx = NO_INDX; 1687 vp_oper->vlan_idx = NO_INDX;
1679 mlx4_warn((&priv->dev), 1688 mlx4_warn(&priv->dev,
1680 "No vlan resorces slave %d, port %d\n", 1689 "No vlan resorces slave %d, port %d\n",
1681 slave, port); 1690 slave, port);
1682 return err; 1691 return err;
1683 } 1692 }
1684 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", 1693 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
1685 (int)(vp_oper->state.default_vlan), 1694 (int)(vp_oper->state.default_vlan),
1686 vp_oper->vlan_idx, slave, port); 1695 vp_oper->vlan_idx, slave, port);
1687 } 1696 }
@@ -1692,12 +1701,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1692 if (0 > vp_oper->mac_idx) { 1701 if (0 > vp_oper->mac_idx) {
1693 err = vp_oper->mac_idx; 1702 err = vp_oper->mac_idx;
1694 vp_oper->mac_idx = NO_INDX; 1703 vp_oper->mac_idx = NO_INDX;
1695 mlx4_warn((&priv->dev), 1704 mlx4_warn(&priv->dev,
1696 "No mac resorces slave %d, port %d\n", 1705 "No mac resorces slave %d, port %d\n",
1697 slave, port); 1706 slave, port);
1698 return err; 1707 return err;
1699 } 1708 }
1700 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", 1709 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
1701 vp_oper->state.mac, vp_oper->mac_idx, slave, port); 1710 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1702 } 1711 }
1703 } 1712 }
@@ -1748,8 +1757,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1748 slave_state[slave].comm_toggle ^= 1; 1757 slave_state[slave].comm_toggle ^= 1;
1749 reply = (u32) slave_state[slave].comm_toggle << 31; 1758 reply = (u32) slave_state[slave].comm_toggle << 31;
1750 if (toggle != slave_state[slave].comm_toggle) { 1759 if (toggle != slave_state[slave].comm_toggle) {
1751 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" 1760 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1752 "STATE COMPROMISIED ***\n", toggle, slave); 1761 toggle, slave);
1753 goto reset_slave; 1762 goto reset_slave;
1754 } 1763 }
1755 if (cmd == MLX4_COMM_CMD_RESET) { 1764 if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1776,8 +1785,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1776 /*command from slave in the middle of FLR*/ 1785 /*command from slave in the middle of FLR*/
1777 if (cmd != MLX4_COMM_CMD_RESET && 1786 if (cmd != MLX4_COMM_CMD_RESET &&
1778 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1787 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1779 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " 1788 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1780 "in the middle of FLR\n", slave, cmd); 1789 slave, cmd);
1781 return; 1790 return;
1782 } 1791 }
1783 1792
@@ -1815,8 +1824,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1815 1824
1816 mutex_lock(&priv->cmd.slave_cmd_mutex); 1825 mutex_lock(&priv->cmd.slave_cmd_mutex);
1817 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 1826 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1818 mlx4_err(dev, "Failed processing vhcr for slave:%d," 1827 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1819 " resetting slave.\n", slave); 1828 slave);
1820 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1829 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1821 goto reset_slave; 1830 goto reset_slave;
1822 } 1831 }
@@ -1833,8 +1842,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1833 is_going_down = 1; 1842 is_going_down = 1;
1834 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 1843 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1835 if (is_going_down) { 1844 if (is_going_down) {
1836 mlx4_warn(dev, "Slave is going down aborting command(%d)" 1845 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
1837 " executing from slave:%d\n",
1838 cmd, slave); 1846 cmd, slave);
1839 return; 1847 return;
1840 } 1848 }
@@ -1897,10 +1905,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
1897 if (toggle != slt) { 1905 if (toggle != slt) {
1898 if (master->slave_state[slave].comm_toggle 1906 if (master->slave_state[slave].comm_toggle
1899 != slt) { 1907 != slt) {
1900 printk(KERN_INFO "slave %d out of sync." 1908 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
1901 " read toggle %d, state toggle %d. " 1909 slave, slt,
1902 "Resynching.\n", slave, slt, 1910 master->slave_state[slave].comm_toggle);
1903 master->slave_state[slave].comm_toggle);
1904 master->slave_state[slave].comm_toggle = 1911 master->slave_state[slave].comm_toggle =
1905 slt; 1912 slt;
1906 } 1913 }
@@ -1913,8 +1920,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
1913 } 1920 }
1914 1921
1915 if (reported && reported != served) 1922 if (reported && reported != served)
1916 mlx4_warn(dev, "Got command event with bitmask from %d slaves" 1923 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
1917 " but %d were served\n",
1918 reported, served); 1924 reported, served);
1919 1925
1920 if (mlx4_ARM_COMM_CHANNEL(dev)) 1926 if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1970,7 +1976,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1970 ioremap(pci_resource_start(dev->pdev, 2) + 1976 ioremap(pci_resource_start(dev->pdev, 2) +
1971 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 1977 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1972 if (!priv->mfunc.comm) { 1978 if (!priv->mfunc.comm) {
1973 mlx4_err(dev, "Couldn't map communication vector.\n"); 1979 mlx4_err(dev, "Couldn't map communication vector\n");
1974 goto err_vhcr; 1980 goto err_vhcr;
1975 } 1981 }
1976 1982
@@ -2097,7 +2103,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2097 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2103 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2098 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2104 MLX4_HCR_BASE, MLX4_HCR_SIZE);
2099 if (!priv->cmd.hcr) { 2105 if (!priv->cmd.hcr) {
2100 mlx4_err(dev, "Couldn't map command register.\n"); 2106 mlx4_err(dev, "Couldn't map command register\n");
2101 return -ENOMEM; 2107 return -ENOMEM;
2102 } 2108 }
2103 } 2109 }
@@ -2498,11 +2504,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
2498 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); 2504 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2499 ivf->mac[5] = ((s_info->mac) & 0xff); 2505 ivf->mac[5] = ((s_info->mac) & 0xff);
2500 2506
2501 ivf->vlan = s_info->default_vlan; 2507 ivf->vlan = s_info->default_vlan;
2502 ivf->qos = s_info->default_qos; 2508 ivf->qos = s_info->default_qos;
2503 ivf->tx_rate = s_info->tx_rate; 2509 ivf->max_tx_rate = s_info->tx_rate;
2504 ivf->spoofchk = s_info->spoofchk; 2510 ivf->min_tx_rate = 0;
2505 ivf->linkstate = s_info->link_state; 2511 ivf->spoofchk = s_info->spoofchk;
2512 ivf->linkstate = s_info->link_state;
2506 2513
2507 return 0; 2514 return 0;
2508} 2515}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index c90cde5b4aee..80f725228f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
293 atomic_set(&cq->refcount, 1); 293 atomic_set(&cq->refcount, 1);
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
296 return 0; 299 return 0;
297 300
298err_radix: 301err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..4b2130760eed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
125 &cq->vector)) { 125 &cq->vector)) {
126 cq->vector = (cq->ring + 1 + priv->port) 126 cq->vector = (cq->ring + 1 + priv->port)
127 % mdev->dev->caps.num_comp_vectors; 127 % mdev->dev->caps.num_comp_vectors;
128 mlx4_warn(mdev, "Failed Assigning an EQ to " 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 "%s ,Falling back to legacy EQ's\n",
130 name); 129 name);
131 } 130 }
132 } 131 }
@@ -164,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
164 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, 163 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
165 NAPI_POLL_WEIGHT); 164 NAPI_POLL_WEIGHT);
166 } else { 165 } else {
166 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
167
168 err = irq_set_affinity_hint(cq->mcq.irq,
169 ring->affinity_mask);
170 if (err)
171 mlx4_warn(mdev, "Failed setting affinity hint\n");
172
167 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 173 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
168 napi_hash_add(&cq->napi); 174 napi_hash_add(&cq->napi);
169 } 175 }
@@ -180,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
180 186
181 mlx4_en_unmap_buffer(&cq->wqres.buf); 187 mlx4_en_unmap_buffer(&cq->wqres.buf);
182 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
183 if (priv->mdev->dev->caps.comp_pool && cq->vector) 189 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
184 mlx4_release_eq(priv->mdev->dev, cq->vector); 192 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 }
185 cq->vector = 0; 194 cq->vector = 0;
186 cq->buf_size = 0; 195 cq->buf_size = 0;
187 cq->buf = NULL; 196 cq->buf = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..fa1a069e14e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -378,8 +378,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
378 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); 378 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
379 cmd->duplex = DUPLEX_FULL; 379 cmd->duplex = DUPLEX_FULL;
380 } else { 380 } else {
381 ethtool_cmd_speed_set(cmd, -1); 381 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
382 cmd->duplex = -1; 382 cmd->duplex = DUPLEX_UNKNOWN;
383 } 383 }
384 384
385 if (trans_type > 0 && trans_type <= 0xC) { 385 if (trans_type > 0 && trans_type <= 0xC) {
@@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
564 return priv->rx_ring_num; 564 return priv->rx_ring_num;
565} 565}
566 566
567static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) 567static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
568{ 568{
569 struct mlx4_en_priv *priv = netdev_priv(dev); 569 struct mlx4_en_priv *priv = netdev_priv(dev);
570 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 570 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
582 return err; 582 return err;
583} 583}
584 584
585static int mlx4_en_set_rxfh_indir(struct net_device *dev, 585static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
586 const u32 *ring_index) 586 const u8 *key)
587{ 587{
588 struct mlx4_en_priv *priv = netdev_priv(dev); 588 struct mlx4_en_priv *priv = netdev_priv(dev);
589 struct mlx4_en_dev *mdev = priv->mdev; 589 struct mlx4_en_dev *mdev = priv->mdev;
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
926 } else { 926 } else {
927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n", 928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
929 cmd->fs.ring_cookie); 929 cmd->fs.ring_cookie);
930 return -EINVAL; 930 return -EINVAL;
931 } 931 }
932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
933 if (!qpn) { 933 if (!qpn) {
934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n", 934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
935 cmd->fs.ring_cookie); 935 cmd->fs.ring_cookie);
936 return -EINVAL; 936 return -EINVAL;
937 } 937 }
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
956 } 956 }
957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); 957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
958 if (err) { 958 if (err) {
959 en_err(priv, "Fail to attach network rule at location %d.\n", 959 en_err(priv, "Fail to attach network rule at location %d\n",
960 cmd->fs.location); 960 cmd->fs.location);
961 goto out_free_list; 961 goto out_free_list;
962 } 962 }
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1121{ 1121{
1122 struct mlx4_en_priv *priv = netdev_priv(dev); 1122 struct mlx4_en_priv *priv = netdev_priv(dev);
1123 struct mlx4_en_dev *mdev = priv->mdev; 1123 struct mlx4_en_dev *mdev = priv->mdev;
1124 int port_up; 1124 int port_up = 0;
1125 int err = 0; 1125 int err = 0;
1126 1126
1127 if (channel->other_count || channel->combined_count || 1127 if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
1151 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1151 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1152 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1152 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1153 1153
1154 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); 1154 if (dev->num_tc)
1155 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1155 1156
1156 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); 1157 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1157 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); 1158 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
@@ -1223,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
1223 .get_rxnfc = mlx4_en_get_rxnfc, 1224 .get_rxnfc = mlx4_en_get_rxnfc,
1224 .set_rxnfc = mlx4_en_set_rxnfc, 1225 .set_rxnfc = mlx4_en_set_rxnfc,
1225 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1226 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1226 .get_rxfh_indir = mlx4_en_get_rxfh_indir, 1227 .get_rxfh = mlx4_en_get_rxfh,
1227 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1228 .set_rxfh = mlx4_en_set_rxfh,
1228 .get_channels = mlx4_en_get_channels, 1229 .get_channels = mlx4_en_get_channels,
1229 .set_channels = mlx4_en_set_channels, 1230 .set_channels = mlx4_en_set_channels,
1230 .get_ts_info = mlx4_en_get_ts_info, 1231 .get_ts_info = mlx4_en_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
133 MLX4_EN_MAX_TX_RING_P_UP); 133 MLX4_EN_MAX_TX_RING_P_UP);
134 if (params->udp_rss && !(mdev->dev->caps.flags 134 if (params->udp_rss && !(mdev->dev->caps.flags
135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
136 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); 136 mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
137 params->udp_rss = 0; 137 params->udp_rss = 0;
138 } 138 }
139 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 139 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
251 251
252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
253 if (!mdev->LSO_support) 253 if (!mdev->LSO_support)
254 mlx4_warn(mdev, "LSO not supported, please upgrade to later " 254 mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
255 "FW version to enable LSO\n");
256 255
257 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, 256 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
258 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 257 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
268 /* Build device profile according to supplied module parameters */ 267 /* Build device profile according to supplied module parameters */
269 err = mlx4_en_get_profile(mdev); 268 err = mlx4_en_get_profile(mdev);
270 if (err) { 269 if (err) {
271 mlx4_err(mdev, "Bad module parameters, aborting.\n"); 270 mlx4_err(mdev, "Bad module parameters, aborting\n");
272 goto err_mr; 271 goto err_mr;
273 } 272 }
274 273
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..7d4fb7bf2593 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
130 case IPPROTO_TCP: 130 case IPPROTO_TCP:
131 return MLX4_NET_TRANS_RULE_ID_TCP; 131 return MLX4_NET_TRANS_RULE_ID_TCP;
132 default: 132 default:
133 return -EPROTONOSUPPORT; 133 return MLX4_NET_TRANS_RULE_NUM;
134 } 134 }
135}; 135};
136 136
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
177 int rc; 177 int rc;
178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
179 179
180 if (spec_tcp_udp.id < 0) { 180 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
182 filter->ip_proto); 182 filter->ip_proto);
183 goto ignore; 183 goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
770 priv->dev->dev_addr, priv->prev_mac); 770 priv->dev->dev_addr, priv->prev_mac);
771 if (err) 771 if (err)
772 en_err(priv, "Failed changing HW MAC address\n"); 772 en_err(priv, "Failed changing HW MAC address\n");
773 memcpy(priv->prev_mac, priv->dev->dev_addr,
774 sizeof(priv->prev_mac));
775 } else 773 } else
776 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 774 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
777 775
776 memcpy(priv->prev_mac, priv->dev->dev_addr,
777 sizeof(priv->prev_mac));
778
778 return err; 779 return err;
779} 780}
780 781
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
788 if (!is_valid_ether_addr(saddr->sa_data)) 789 if (!is_valid_ether_addr(saddr->sa_data))
789 return -EADDRNOTAVAIL; 790 return -EADDRNOTAVAIL;
790 791
791 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
792
793 mutex_lock(&mdev->state_lock); 792 mutex_lock(&mdev->state_lock);
793 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
794 err = mlx4_en_do_set_mac(priv); 794 err = mlx4_en_do_set_mac(priv);
795 mutex_unlock(&mdev->state_lock); 795 mutex_unlock(&mdev->state_lock);
796 796
@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
1526 mutex_unlock(&mdev->state_lock); 1526 mutex_unlock(&mdev->state_lock);
1527} 1527}
1528 1528
1529static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1530{
1531 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1532 int numa_node = priv->mdev->dev->numa_node;
1533 int ret = 0;
1534
1535 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1536 return -ENOMEM;
1537
1538 ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
1539 ring->affinity_mask);
1540 if (ret)
1541 free_cpumask_var(ring->affinity_mask);
1542
1543 return ret;
1544}
1545
1546static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1547{
1548 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1549}
1529 1550
1530int mlx4_en_start_port(struct net_device *dev) 1551int mlx4_en_start_port(struct net_device *dev)
1531{ 1552{
@@ -1567,17 +1588,25 @@ int mlx4_en_start_port(struct net_device *dev)
1567 1588
1568 mlx4_en_cq_init_lock(cq); 1589 mlx4_en_cq_init_lock(cq);
1569 1590
1591 err = mlx4_en_init_affinity_hint(priv, i);
1592 if (err) {
1593 en_err(priv, "Failed preparing IRQ affinity hint\n");
1594 goto cq_err;
1595 }
1596
1570 err = mlx4_en_activate_cq(priv, cq, i); 1597 err = mlx4_en_activate_cq(priv, cq, i);
1571 if (err) { 1598 if (err) {
1572 en_err(priv, "Failed activating Rx CQ\n"); 1599 en_err(priv, "Failed activating Rx CQ\n");
1600 mlx4_en_free_affinity_hint(priv, i);
1573 goto cq_err; 1601 goto cq_err;
1574 } 1602 }
1575 for (j = 0; j < cq->size; j++) 1603 for (j = 0; j < cq->size; j++)
1576 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1604 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1577 err = mlx4_en_set_cq_moder(priv, cq); 1605 err = mlx4_en_set_cq_moder(priv, cq);
1578 if (err) { 1606 if (err) {
1579 en_err(priv, "Failed setting cq moderation parameters"); 1607 en_err(priv, "Failed setting cq moderation parameters\n");
1580 mlx4_en_deactivate_cq(priv, cq); 1608 mlx4_en_deactivate_cq(priv, cq);
1609 mlx4_en_free_affinity_hint(priv, i);
1581 goto cq_err; 1610 goto cq_err;
1582 } 1611 }
1583 mlx4_en_arm_cq(priv, cq); 1612 mlx4_en_arm_cq(priv, cq);
@@ -1615,7 +1644,7 @@ int mlx4_en_start_port(struct net_device *dev)
1615 } 1644 }
1616 err = mlx4_en_set_cq_moder(priv, cq); 1645 err = mlx4_en_set_cq_moder(priv, cq);
1617 if (err) { 1646 if (err) {
1618 en_err(priv, "Failed setting cq moderation parameters"); 1647 en_err(priv, "Failed setting cq moderation parameters\n");
1619 mlx4_en_deactivate_cq(priv, cq); 1648 mlx4_en_deactivate_cq(priv, cq);
1620 goto tx_err; 1649 goto tx_err;
1621 } 1650 }
@@ -1715,8 +1744,10 @@ rss_err:
1715mac_err: 1744mac_err:
1716 mlx4_en_put_qp(priv); 1745 mlx4_en_put_qp(priv);
1717cq_err: 1746cq_err:
1718 while (rx_index--) 1747 while (rx_index--) {
1719 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1748 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1749 mlx4_en_free_affinity_hint(priv, i);
1750 }
1720 for (i = 0; i < priv->rx_ring_num; i++) 1751 for (i = 0; i < priv->rx_ring_num; i++)
1721 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1752 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1722 1753
@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1847 msleep(1); 1878 msleep(1);
1848 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1879 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1849 mlx4_en_deactivate_cq(priv, cq); 1880 mlx4_en_deactivate_cq(priv, cq);
1881
1882 mlx4_en_free_affinity_hint(priv, i);
1850 } 1883 }
1851} 1884}
1852 1885
@@ -2539,7 +2572,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2539 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 2572 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2540 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 2573 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2541 2574
2542 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 2575 dev->ethtool_ops = &mlx4_en_ethtool_ops;
2543 2576
2544 /* 2577 /*
2545 * Set driver features 2578 * Set driver features
@@ -2594,8 +2627,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2594 prof->tx_pause, prof->tx_ppp, 2627 prof->tx_pause, prof->tx_ppp,
2595 prof->rx_pause, prof->rx_ppp); 2628 prof->rx_pause, prof->rx_ppp);
2596 if (err) { 2629 if (err) {
2597 en_err(priv, "Failed setting port general configurations " 2630 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2598 "for port %d, with error %d\n", priv->port, err); 2631 priv->port, err);
2599 goto out; 2632 goto out;
2600 } 2633 }
2601 2634
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 87857a6463eb..d2d415732d99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
270 ring->actual_size, 270 ring->actual_size,
271 GFP_KERNEL)) { 271 GFP_KERNEL)) {
272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
273 en_err(priv, "Failed to allocate " 273 en_err(priv, "Failed to allocate enough rx buffers\n");
274 "enough rx buffers\n");
275 return -ENOMEM; 274 return -ENOMEM;
276 } else { 275 } else {
277 new_size = rounddown_pow_of_two(ring->actual_size); 276 new_size = rounddown_pow_of_two(ring->actual_size);
278 en_warn(priv, "Only %d buffers allocated " 277 en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
279 "reducing ring size to %d",
280 ring->actual_size, new_size); 278 ring->actual_size, new_size);
281 goto reduce_rings; 279 goto reduce_rings;
282 } 280 }
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
685 /* Drop packet on bad receive or bad checksum */ 683 /* Drop packet on bad receive or bad checksum */
686 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 684 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
687 MLX4_CQE_OPCODE_ERROR)) { 685 MLX4_CQE_OPCODE_ERROR)) {
688 en_err(priv, "CQE completed in error - vendor " 686 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
689 "syndrom:%d syndrom:%d\n", 687 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
690 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 688 ((struct mlx4_err_cqe *)cqe)->syndrome);
691 ((struct mlx4_err_cqe *) cqe)->syndrome);
692 goto next; 689 goto next;
693 } 690 }
694 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 691 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
898 mlx4_en_cq_unlock_napi(cq); 895 mlx4_en_cq_unlock_napi(cq);
899 896
900 /* If we used up all the quota - we're probably not done yet... */ 897 /* If we used up all the quota - we're probably not done yet... */
901 if (done == budget) 898 if (done == budget) {
902 INC_PERF_COUNTER(priv->pstats.napi_quota); 899 INC_PERF_COUNTER(priv->pstats.napi_quota);
903 else { 900 if (unlikely(cq->mcq.irq_affinity_change)) {
901 cq->mcq.irq_affinity_change = false;
902 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq);
904 return 0;
905 }
906 } else {
904 /* Done for now */ 907 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
905 napi_complete(napi); 909 napi_complete(napi);
906 mlx4_en_arm_cq(priv, cq); 910 mlx4_en_arm_cq(priv, cq);
907 } 911 }
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
944 priv->rx_skb_size = eff_mtu; 948 priv->rx_skb_size = eff_mtu;
945 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); 949 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
946 950
947 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 951 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
948 "num_frags:%d):\n", eff_mtu, priv->num_frags); 952 eff_mtu, priv->num_frags);
949 for (i = 0; i < priv->num_frags; i++) { 953 for (i = 0; i < priv->num_frags; i++) {
950 en_err(priv, 954 en_err(priv,
951 " frag:%d - size:%d prefix:%d align:%d stride:%d\n", 955 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index bc0cc1eb214d..8be7483f8236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
108 108
109 ring->buf = ring->wqres.buf.direct.buf; 109 ring->buf = ring->wqres.buf.direct.buf;
110 110
111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
112 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 112 ring, ring->buf, ring->size, ring->buf_size,
113 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 113 (unsigned long long) ring->wqres.buf.direct.map);
114 114
115 ring->qpn = qpn; 115 ring->qpn = qpn;
116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); 116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
122 122
123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
124 if (err) { 124 if (err) {
125 en_dbg(DRV, priv, "working without blueflame (%d)", err); 125 en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
126 ring->bf.uar = &mdev->priv_uar; 126 ring->bf.uar = &mdev->priv_uar;
127 ring->bf.uar->map = mdev->uar_map; 127 ring->bf.uar->map = mdev->uar_map;
128 ring->bf_enabled = false; 128 ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
474 /* If we used up all the quota - we're probably not done yet... */ 474 /* If we used up all the quota - we're probably not done yet... */
475 if (done < budget) { 475 if (done < budget) {
476 /* Done for now */ 476 /* Done for now */
477 cq->mcq.irq_affinity_change = false;
477 napi_complete(napi); 478 napi_complete(napi);
478 mlx4_en_arm_cq(priv, cq); 479 mlx4_en_arm_cq(priv, cq);
479 return done; 480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
480 } 486 }
481 return budget; 487 return budget;
482} 488}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..d954ec1eac17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,6 +53,11 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
56#define MLX4_EQ_STATUS_OK ( 0 << 28) 61#define MLX4_EQ_STATUS_OK ( 0 << 28)
57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58#define MLX4_EQ_OWNER_SW ( 0 << 24) 63#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
152 if (i != dev->caps.function && 157 if (i != dev->caps.function &&
153 master->slave_state[i].active) 158 master->slave_state[i].active)
154 if (mlx4_GEN_EQE(dev, i, eqe)) 159 if (mlx4_GEN_EQE(dev, i, eqe))
155 mlx4_warn(dev, "Failed to " 160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
156 " generate event " 161 i);
157 "for slave %d\n", i);
158 } 162 }
159 } else { 163 } else {
160 if (mlx4_GEN_EQE(dev, slave, eqe)) 164 if (mlx4_GEN_EQE(dev, slave, eqe))
161 mlx4_warn(dev, "Failed to generate event " 165 mlx4_warn(dev, "Failed to generate event for slave %d\n",
162 "for slave %d\n", slave); 166 slave);
163 } 167 }
164 ++slave_eq->cons; 168 ++slave_eq->cons;
165 } 169 }
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
177 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 181 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
178 if ((!!(s_eqe->owner & 0x80)) ^ 182 if ((!!(s_eqe->owner & 0x80)) ^
179 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 183 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
180 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " 184 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
181 "No free EQE on slave events queue\n", slave); 185 slave);
182 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 186 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
183 return; 187 return;
184 } 188 }
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
375 } 379 }
376 break; 380 break;
377 default: 381 default:
378 pr_err("%s: BUG!!! UNKNOWN state: " 382 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
379 "slave:%d, port:%d\n", __func__, slave, port); 383 __func__, slave, port);
380 goto out; 384 goto out;
381 } 385 }
382 ret = mlx4_get_slave_port_state(dev, slave, port); 386 ret = mlx4_get_slave_port_state(dev, slave, port);
383 387
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
425 for (i = 0 ; i < dev->num_slaves; i++) { 429 for (i = 0 ; i < dev->num_slaves; i++) {
426 430
427 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 431 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
428 mlx4_dbg(dev, "mlx4_handle_slave_flr: " 432 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
429 "clean slave: %d\n", i); 433 i);
430 434
431 mlx4_delete_all_resources_for_slave(dev, i); 435 mlx4_delete_all_resources_for_slave(dev, i);
432 /*return the slave to running mode*/ 436 /*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
438 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 442 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
439 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 443 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
440 if (err) 444 if (err)
441 mlx4_warn(dev, "Failed to notify FW on " 445 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
442 "FLR done (slave:%d)\n", i); 446 i);
443 } 447 }
444 } 448 }
445} 449}
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
490 be32_to_cpu(eqe->event.qp.qpn) 494 be32_to_cpu(eqe->event.qp.qpn)
491 & 0xffffff, &slave); 495 & 0xffffff, &slave);
492 if (ret && ret != -ENOENT) { 496 if (ret && ret != -ENOENT) {
493 mlx4_dbg(dev, "QP event %02x(%02x) on " 497 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
494 "EQ %d at index %u: could "
495 "not get slave id (%d)\n",
496 eqe->type, eqe->subtype, 498 eqe->type, eqe->subtype,
497 eq->eqn, eq->cons_index, ret); 499 eq->eqn, eq->cons_index, ret);
498 break; 500 break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
520 & 0xffffff, 522 & 0xffffff,
521 &slave); 523 &slave);
522 if (ret && ret != -ENOENT) { 524 if (ret && ret != -ENOENT) {
523 mlx4_warn(dev, "SRQ event %02x(%02x) " 525 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
524 "on EQ %d at index %u: could"
525 " not get slave id (%d)\n",
526 eqe->type, eqe->subtype, 526 eqe->type, eqe->subtype,
527 eq->eqn, eq->cons_index, ret); 527 eq->eqn, eq->cons_index, ret);
528 break; 528 break;
529 } 529 }
530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," 530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
531 " event: %02x(%02x)\n", __func__, 531 __func__, slave,
532 slave,
533 be32_to_cpu(eqe->event.srq.srqn), 532 be32_to_cpu(eqe->event.srq.srqn),
534 eqe->type, eqe->subtype); 533 eqe->type, eqe->subtype);
535 534
536 if (!ret && slave != dev->caps.function) { 535 if (!ret && slave != dev->caps.function) {
537 mlx4_warn(dev, "%s: sending event " 536 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
538 "%02x(%02x) to slave:%d\n", 537 __func__, eqe->type,
539 __func__, eqe->type,
540 eqe->subtype, slave); 538 eqe->subtype, slave);
541 mlx4_slave_event(dev, slave, eqe); 539 mlx4_slave_event(dev, slave, eqe);
542 break; 540 break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 567 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
570 if (i == mlx4_master_func_num(dev)) 568 if (i == mlx4_master_func_num(dev))
571 continue; 569 continue;
572 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" 570 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
573 " to slave: %d, port:%d\n",
574 __func__, i, port); 571 __func__, i, port);
575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 572 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 573 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
634 be32_to_cpu(eqe->event.cq_err.cqn) 631 be32_to_cpu(eqe->event.cq_err.cqn)
635 & 0xffffff, &slave); 632 & 0xffffff, &slave);
636 if (ret && ret != -ENOENT) { 633 if (ret && ret != -ENOENT) {
637 mlx4_dbg(dev, "CQ event %02x(%02x) on " 634 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
638 "EQ %d at index %u: could " 635 eqe->type, eqe->subtype,
639 "not get slave id (%d)\n", 636 eq->eqn, eq->cons_index, ret);
640 eqe->type, eqe->subtype,
641 eq->eqn, eq->cons_index, ret);
642 break; 637 break;
643 } 638 }
644 639
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
667 662
668 case MLX4_EVENT_TYPE_COMM_CHANNEL: 663 case MLX4_EVENT_TYPE_COMM_CHANNEL:
669 if (!mlx4_is_master(dev)) { 664 if (!mlx4_is_master(dev)) {
670 mlx4_warn(dev, "Received comm channel event " 665 mlx4_warn(dev, "Received comm channel event for non master device\n");
671 "for non master device\n");
672 break; 666 break;
673 } 667 }
674 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 668 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
681 case MLX4_EVENT_TYPE_FLR_EVENT: 675 case MLX4_EVENT_TYPE_FLR_EVENT:
682 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 676 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
683 if (!mlx4_is_master(dev)) { 677 if (!mlx4_is_master(dev)) {
684 mlx4_warn(dev, "Non-master function received" 678 mlx4_warn(dev, "Non-master function received FLR event\n");
685 "FLR event\n");
686 break; 679 break;
687 } 680 }
688 681
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
711 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 704 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
712 if (mlx4_is_master(dev)) 705 if (mlx4_is_master(dev))
713 for (i = 0; i < dev->num_slaves; i++) { 706 for (i = 0; i < dev->num_slaves; i++) {
714 mlx4_dbg(dev, "%s: Sending " 707 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
715 "MLX4_FATAL_WARNING_SUBTYPE_WARMING" 708 __func__, i);
716 " to slave: %d\n", __func__, i);
717 if (i == dev->caps.function) 709 if (i == dev->caps.function)
718 continue; 710 continue;
719 mlx4_slave_event(dev, i, eqe); 711 mlx4_slave_event(dev, i, eqe);
720 } 712 }
721 mlx4_err(dev, "Temperature Threshold was reached! " 713 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
722 "Threshold: %d celsius degrees; " 714 be16_to_cpu(eqe->event.warming.warning_threshold),
723 "Current Temperature: %d\n", 715 be16_to_cpu(eqe->event.warming.current_temperature));
724 be16_to_cpu(eqe->event.warming.warning_threshold),
725 be16_to_cpu(eqe->event.warming.current_temperature));
726 } else 716 } else
727 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " 717 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
728 "subtype %02x on EQ %d at index %u. owner=%x, "
729 "nent=0x%x, slave=%x, ownership=%s\n",
730 eqe->type, eqe->subtype, eq->eqn, 718 eqe->type, eqe->subtype, eq->eqn,
731 eq->cons_index, eqe->owner, eq->nent, 719 eq->cons_index, eqe->owner, eq->nent,
732 eqe->slave_id, 720 eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
743 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 731 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
744 case MLX4_EVENT_TYPE_ECC_DETECT: 732 case MLX4_EVENT_TYPE_ECC_DETECT:
745 default: 733 default:
746 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " 734 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
747 "index %u. owner=%x, nent=0x%x, slave=%x, "
748 "ownership=%s\n",
749 eqe->type, eqe->subtype, eq->eqn, 735 eqe->type, eqe->subtype, eq->eqn,
750 eq->cons_index, eqe->owner, eq->nent, 736 eq->cons_index, eqe->owner, eq->nent,
751 eqe->slave_id, 737 eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
1088 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1074 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1089 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1075 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1090 if (!priv->clr_base) { 1076 if (!priv->clr_base) {
1091 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 1077 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1092 return -ENOMEM; 1078 return -ENOMEM;
1093 } 1079 }
1094 1080
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1102 iounmap(priv->clr_base); 1088 iounmap(priv->clr_base);
1103} 1089}
1104 1090
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1105int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1142int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1106{ 1143{
1107 struct mlx4_priv *priv = mlx4_priv(dev); 1144 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1372 continue; 1409 continue;
1373 /*we dont want to break here*/ 1410 /*we dont want to break here*/
1374 } 1411 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414
1375 eq_set_ci(&priv->eq_table.eq[vec], 1); 1415 eq_set_ci(&priv->eq_table.eq[vec], 1);
1376 } 1416 }
1377 } 1417 }
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1398 Belonging to a legacy EQ*/ 1438 Belonging to a legacy EQ*/
1399 mutex_lock(&priv->msix_ctl.pool_lock); 1439 mutex_lock(&priv->msix_ctl.pool_lock);
1400 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1440 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1401 free_irq(priv->eq_table.eq[vec].irq, 1444 free_irq(priv->eq_table.eq[vec].irq,
1402 &priv->eq_table.eq[vec]); 1445 &priv->eq_table.eq[vec]);
1403 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1446 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 01e6dd61ee3c..688e1eabab29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -437,8 +437,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
440 mlx4_err(dev, "phy_wqe_gid is " 440 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
441 "enforced on this ib port\n");
442 err = -EPROTONOSUPPORT; 441 err = -EPROTONOSUPPORT;
443 goto out; 442 goto out;
444 } 443 }
@@ -1070,10 +1069,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1070 */ 1069 */
1071 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1070 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1072 if (lg < MLX4_ICM_PAGE_SHIFT) { 1071 if (lg < MLX4_ICM_PAGE_SHIFT) {
1073 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", 1072 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1074 MLX4_ICM_PAGE_SIZE, 1073 MLX4_ICM_PAGE_SIZE,
1075 (unsigned long long) mlx4_icm_addr(&iter), 1074 (unsigned long long) mlx4_icm_addr(&iter),
1076 mlx4_icm_size(&iter)); 1075 mlx4_icm_size(&iter));
1077 err = -EINVAL; 1076 err = -EINVAL;
1078 goto out; 1077 goto out;
1079 } 1078 }
@@ -1109,14 +1108,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1109 1108
1110 switch (op) { 1109 switch (op) {
1111 case MLX4_CMD_MAP_FA: 1110 case MLX4_CMD_MAP_FA:
1112 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); 1111 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1113 break; 1112 break;
1114 case MLX4_CMD_MAP_ICM_AUX: 1113 case MLX4_CMD_MAP_ICM_AUX:
1115 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); 1114 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1116 break; 1115 break;
1117 case MLX4_CMD_MAP_ICM: 1116 case MLX4_CMD_MAP_ICM:
1118 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", 1117 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1119 tc, ts, (unsigned long long) virt - (ts << 10)); 1118 tc, ts, (unsigned long long) virt - (ts << 10));
1120 break; 1119 break;
1121 } 1120 }
1122 1121
@@ -1202,14 +1201,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
1202 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1201 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1203 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1202 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1204 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1203 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1205 mlx4_err(dev, "Installed FW has unsupported " 1204 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1206 "command interface revision %d.\n",
1207 cmd_if_rev); 1205 cmd_if_rev);
1208 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1206 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1209 (int) (dev->caps.fw_ver >> 32), 1207 (int) (dev->caps.fw_ver >> 32),
1210 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1208 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1211 (int) dev->caps.fw_ver & 0xffff); 1209 (int) dev->caps.fw_ver & 0xffff);
1212 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", 1210 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1213 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1211 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1214 err = -ENODEV; 1212 err = -ENODEV;
1215 goto out; 1213 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 26169b3eaed8..5f42f6d6e4c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
104MODULE_PARM_DESC(enable_64b_cqe_eqe, 104MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
106 106
107#define HCA_GLOBAL_CAP_MASK 0
108
109#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE 107#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
110 108
111static char mlx4_version[] = 109static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
134 132
135static bool use_prio; 133static bool use_prio;
136module_param_named(use_prio, use_prio, bool, 0444); 134module_param_named(use_prio, use_prio, bool, 0444);
137MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 135MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
138 "(0/1, default 0)");
139 136
140int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 137int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
141module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 138module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
163 for (i = 0; i < dev->caps.num_ports - 1; i++) { 160 for (i = 0; i < dev->caps.num_ports - 1; i++) {
164 if (port_type[i] != port_type[i + 1]) { 161 if (port_type[i] != port_type[i + 1]) {
165 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 162 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
166 mlx4_err(dev, "Only same port types supported " 163 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
167 "on this HCA, aborting.\n");
168 return -EINVAL; 164 return -EINVAL;
169 } 165 }
170 } 166 }
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
172 168
173 for (i = 0; i < dev->caps.num_ports; i++) { 169 for (i = 0; i < dev->caps.num_ports; i++) {
174 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 170 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
175 mlx4_err(dev, "Requested port type for port %d is not " 171 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
176 "supported on this HCA\n", i + 1); 172 i + 1);
177 return -EINVAL; 173 return -EINVAL;
178 } 174 }
179 } 175 }
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
195 191
196 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 192 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
197 if (err) { 193 if (err) {
198 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 194 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
199 return err; 195 return err;
200 } 196 }
201 197
202 if (dev_cap->min_page_sz > PAGE_SIZE) { 198 if (dev_cap->min_page_sz > PAGE_SIZE) {
203 mlx4_err(dev, "HCA minimum page size of %d bigger than " 199 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
204 "kernel PAGE_SIZE of %ld, aborting.\n",
205 dev_cap->min_page_sz, PAGE_SIZE); 200 dev_cap->min_page_sz, PAGE_SIZE);
206 return -ENODEV; 201 return -ENODEV;
207 } 202 }
208 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 203 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
209 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 204 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
210 "aborting.\n",
211 dev_cap->num_ports, MLX4_MAX_PORTS); 205 dev_cap->num_ports, MLX4_MAX_PORTS);
212 return -ENODEV; 206 return -ENODEV;
213 } 207 }
214 208
215 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 209 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
216 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 210 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
217 "PCI resource 2 size of 0x%llx, aborting.\n",
218 dev_cap->uar_size, 211 dev_cap->uar_size,
219 (unsigned long long) pci_resource_len(dev->pdev, 2)); 212 (unsigned long long) pci_resource_len(dev->pdev, 2));
220 return -ENODEV; 213 return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
296 289
297 dev->caps.log_num_macs = log_num_mac; 290 dev->caps.log_num_macs = log_num_mac;
298 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 291 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
299 dev->caps.log_num_prios = use_prio ? 3 : 0;
300 292
301 for (i = 1; i <= dev->caps.num_ports; ++i) { 293 for (i = 1; i <= dev->caps.num_ports; ++i) {
302 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 294 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
347 339
348 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 340 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
349 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 341 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
350 mlx4_warn(dev, "Requested number of MACs is too much " 342 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
351 "for port %d, reducing to %d.\n",
352 i, 1 << dev->caps.log_num_macs); 343 i, 1 << dev->caps.log_num_macs);
353 } 344 }
354 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 345 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
355 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 346 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
356 mlx4_warn(dev, "Requested number of VLANs is too much " 347 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
357 "for port %d, reducing to %d.\n",
358 i, 1 << dev->caps.log_num_vlans); 348 i, 1 << dev->caps.log_num_vlans);
359 } 349 }
360 } 350 }
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
366 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 356 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
367 (1 << dev->caps.log_num_macs) * 357 (1 << dev->caps.log_num_macs) *
368 (1 << dev->caps.log_num_vlans) * 358 (1 << dev->caps.log_num_vlans) *
369 (1 << dev->caps.log_num_prios) *
370 dev->caps.num_ports; 359 dev->caps.num_ports;
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 360 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
372 361
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
584 memset(&hca_param, 0, sizeof(hca_param)); 573 memset(&hca_param, 0, sizeof(hca_param));
585 err = mlx4_QUERY_HCA(dev, &hca_param); 574 err = mlx4_QUERY_HCA(dev, &hca_param);
586 if (err) { 575 if (err) {
587 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 576 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
588 return err; 577 return err;
589 } 578 }
590 579
591 /*fail if the hca has an unknown capability */ 580 /* fail if the hca has an unknown global capability
592 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 581 * at this time global_caps should be always zeroed
593 HCA_GLOBAL_CAP_MASK) { 582 */
583 if (hca_param.global_caps) {
594 mlx4_err(dev, "Unknown hca global capabilities\n"); 584 mlx4_err(dev, "Unknown hca global capabilities\n");
595 return -ENOSYS; 585 return -ENOSYS;
596 } 586 }
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
603 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 593 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
604 err = mlx4_dev_cap(dev, &dev_cap); 594 err = mlx4_dev_cap(dev, &dev_cap);
605 if (err) { 595 if (err) {
606 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 596 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
607 return err; 597 return err;
608 } 598 }
609 599
610 err = mlx4_QUERY_FW(dev); 600 err = mlx4_QUERY_FW(dev);
611 if (err) 601 if (err)
612 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 602 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
613 603
614 page_size = ~dev->caps.page_size_cap + 1; 604 page_size = ~dev->caps.page_size_cap + 1;
615 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 605 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
616 if (page_size > PAGE_SIZE) { 606 if (page_size > PAGE_SIZE) {
617 mlx4_err(dev, "HCA minimum page size of %d bigger than " 607 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
618 "kernel PAGE_SIZE of %ld, aborting.\n",
619 page_size, PAGE_SIZE); 608 page_size, PAGE_SIZE);
620 return -ENODEV; 609 return -ENODEV;
621 } 610 }
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
633 memset(&func_cap, 0, sizeof(func_cap)); 622 memset(&func_cap, 0, sizeof(func_cap));
634 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 623 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
635 if (err) { 624 if (err) {
636 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", 625 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
637 err); 626 err);
638 return err; 627 return err;
639 } 628 }
640 629
@@ -661,8 +650,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
661 dev->caps.num_amgms = 0; 650 dev->caps.num_amgms = 0;
662 651
663 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 652 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
664 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 653 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
665 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 654 dev->caps.num_ports, MLX4_MAX_PORTS);
666 return -ENODEV; 655 return -ENODEV;
667 } 656 }
668 657
@@ -682,8 +671,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
682 for (i = 1; i <= dev->caps.num_ports; ++i) { 671 for (i = 1; i <= dev->caps.num_ports; ++i) {
683 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 672 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
684 if (err) { 673 if (err) {
685 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" 674 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
686 " port %d, aborting (%d).\n", i, err); 675 i, err);
687 goto err_mem; 676 goto err_mem;
688 } 677 }
689 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 678 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
@@ -702,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
702 if (dev->caps.uar_page_size * (dev->caps.num_uars - 691 if (dev->caps.uar_page_size * (dev->caps.num_uars -
703 dev->caps.reserved_uars) > 692 dev->caps.reserved_uars) >
704 pci_resource_len(dev->pdev, 2)) { 693 pci_resource_len(dev->pdev, 2)) {
705 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 694 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
706 "PCI resource 2 size of 0x%llx, aborting.\n",
707 dev->caps.uar_page_size * dev->caps.num_uars, 695 dev->caps.uar_page_size * dev->caps.num_uars,
708 (unsigned long long) pci_resource_len(dev->pdev, 2)); 696 (unsigned long long) pci_resource_len(dev->pdev, 2));
709 goto err_mem; 697 goto err_mem;
@@ -725,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
725 } 713 }
726 714
727 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 715 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
728 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); 716 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
729 717
730 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 718 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
731 719
@@ -791,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
791 dev->caps.port_type[port] = port_types[port - 1]; 779 dev->caps.port_type[port] = port_types[port - 1];
792 err = mlx4_SET_PORT(dev, port, -1); 780 err = mlx4_SET_PORT(dev, port, -1);
793 if (err) { 781 if (err) {
794 mlx4_err(dev, "Failed to set port %d, " 782 mlx4_err(dev, "Failed to set port %d, aborting\n",
795 "aborting\n", port); 783 port);
796 goto out; 784 goto out;
797 } 785 }
798 } 786 }
@@ -875,9 +863,7 @@ static ssize_t set_port_type(struct device *dev,
875 } 863 }
876 } 864 }
877 if (err) { 865 if (err) {
878 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 866 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
879 "Set only 'eth' or 'ib' for both ports "
880 "(should be the same)\n");
881 goto out; 867 goto out;
882 } 868 }
883 869
@@ -982,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
982 mlx4_CLOSE_PORT(mdev, port); 968 mlx4_CLOSE_PORT(mdev, port);
983 err = mlx4_SET_PORT(mdev, port, -1); 969 err = mlx4_SET_PORT(mdev, port, -1);
984 if (err) { 970 if (err) {
985 mlx4_err(mdev, "Failed to set port %d, " 971 mlx4_err(mdev, "Failed to set port %d, aborting\n",
986 "aborting\n", port); 972 port);
987 goto err_set_port; 973 goto err_set_port;
988 } 974 }
989 } 975 }
@@ -1002,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
1002 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 988 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1003 GFP_HIGHUSER | __GFP_NOWARN, 0); 989 GFP_HIGHUSER | __GFP_NOWARN, 0);
1004 if (!priv->fw.fw_icm) { 990 if (!priv->fw.fw_icm) {
1005 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 991 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1006 return -ENOMEM; 992 return -ENOMEM;
1007 } 993 }
1008 994
1009 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 995 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1010 if (err) { 996 if (err) {
1011 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 997 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1012 goto err_free; 998 goto err_free;
1013 } 999 }
1014 1000
1015 err = mlx4_RUN_FW(dev); 1001 err = mlx4_RUN_FW(dev);
1016 if (err) { 1002 if (err) {
1017 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 1003 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1018 goto err_unmap_fa; 1004 goto err_unmap_fa;
1019 } 1005 }
1020 1006
@@ -1098,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1098 1084
1099 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1085 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1100 if (err) { 1086 if (err) {
1101 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 1087 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1102 return err; 1088 return err;
1103 } 1089 }
1104 1090
1105 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 1091 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1106 (unsigned long long) icm_size >> 10, 1092 (unsigned long long) icm_size >> 10,
1107 (unsigned long long) aux_pages << 2); 1093 (unsigned long long) aux_pages << 2);
1108 1094
1109 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1095 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1110 GFP_HIGHUSER | __GFP_NOWARN, 0); 1096 GFP_HIGHUSER | __GFP_NOWARN, 0);
1111 if (!priv->fw.aux_icm) { 1097 if (!priv->fw.aux_icm) {
1112 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 1098 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1113 return -ENOMEM; 1099 return -ENOMEM;
1114 } 1100 }
1115 1101
1116 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1102 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1117 if (err) { 1103 if (err) {
1118 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 1104 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1119 goto err_free_aux; 1105 goto err_free_aux;
1120 } 1106 }
1121 1107
1122 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1108 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1123 if (err) { 1109 if (err) {
1124 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 1110 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1125 goto err_unmap_aux; 1111 goto err_unmap_aux;
1126 } 1112 }
1127 1113
@@ -1132,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1132 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1118 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1133 num_eqs, num_eqs, 0, 0); 1119 num_eqs, num_eqs, 0, 0);
1134 if (err) { 1120 if (err) {
1135 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 1121 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1136 goto err_unmap_cmpt; 1122 goto err_unmap_cmpt;
1137 } 1123 }
1138 1124
@@ -1153,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1153 dev->caps.num_mtts, 1139 dev->caps.num_mtts,
1154 dev->caps.reserved_mtts, 1, 0); 1140 dev->caps.reserved_mtts, 1, 0);
1155 if (err) { 1141 if (err) {
1156 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 1142 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1157 goto err_unmap_eq; 1143 goto err_unmap_eq;
1158 } 1144 }
1159 1145
@@ -1163,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1163 dev->caps.num_mpts, 1149 dev->caps.num_mpts,
1164 dev->caps.reserved_mrws, 1, 1); 1150 dev->caps.reserved_mrws, 1, 1);
1165 if (err) { 1151 if (err) {
1166 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 1152 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1167 goto err_unmap_mtt; 1153 goto err_unmap_mtt;
1168 } 1154 }
1169 1155
@@ -1174,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1174 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1160 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1175 0, 0); 1161 0, 0);
1176 if (err) { 1162 if (err) {
1177 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 1163 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1178 goto err_unmap_dmpt; 1164 goto err_unmap_dmpt;
1179 } 1165 }
1180 1166
@@ -1185,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1185 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1171 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1186 0, 0); 1172 0, 0);
1187 if (err) { 1173 if (err) {
1188 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 1174 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1189 goto err_unmap_qp; 1175 goto err_unmap_qp;
1190 } 1176 }
1191 1177
@@ -1196,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1196 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1182 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1197 0, 0); 1183 0, 0);
1198 if (err) { 1184 if (err) {
1199 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 1185 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1200 goto err_unmap_auxc; 1186 goto err_unmap_auxc;
1201 } 1187 }
1202 1188
@@ -1217,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1217 dev->caps.num_cqs, 1203 dev->caps.num_cqs,
1218 dev->caps.reserved_cqs, 0, 0); 1204 dev->caps.reserved_cqs, 0, 0);
1219 if (err) { 1205 if (err) {
1220 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 1206 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1221 goto err_unmap_rdmarc; 1207 goto err_unmap_rdmarc;
1222 } 1208 }
1223 1209
@@ -1227,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1227 dev->caps.num_srqs, 1213 dev->caps.num_srqs,
1228 dev->caps.reserved_srqs, 0, 0); 1214 dev->caps.reserved_srqs, 0, 0);
1229 if (err) { 1215 if (err) {
1230 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 1216 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1231 goto err_unmap_cq; 1217 goto err_unmap_cq;
1232 } 1218 }
1233 1219
@@ -1245,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1245 dev->caps.num_mgms + dev->caps.num_amgms, 1231 dev->caps.num_mgms + dev->caps.num_amgms,
1246 0, 0); 1232 0, 0);
1247 if (err) { 1233 if (err) {
1248 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 1234 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1249 goto err_unmap_srq; 1235 goto err_unmap_srq;
1250 } 1236 }
1251 1237
@@ -1322,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1322 1308
1323 mutex_lock(&priv->cmd.slave_cmd_mutex); 1309 mutex_lock(&priv->cmd.slave_cmd_mutex);
1324 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1310 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1325 mlx4_warn(dev, "Failed to close slave function.\n"); 1311 mlx4_warn(dev, "Failed to close slave function\n");
1326 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1312 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1327} 1313}
1328 1314
@@ -1420,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1420 u32 cmd_channel_ver; 1406 u32 cmd_channel_ver;
1421 1407
1422 if (atomic_read(&pf_loading)) { 1408 if (atomic_read(&pf_loading)) {
1423 mlx4_warn(dev, "PF is not ready. Deferring probe\n"); 1409 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1424 return -EPROBE_DEFER; 1410 return -EPROBE_DEFER;
1425 } 1411 }
1426 1412
@@ -1433,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1433 * NUM_OF_RESET_RETRIES times before leaving.*/ 1419 * NUM_OF_RESET_RETRIES times before leaving.*/
1434 if (ret_from_reset) { 1420 if (ret_from_reset) {
1435 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1421 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1436 mlx4_warn(dev, "slave is currently in the " 1422 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1437 "middle of FLR. Deferring probe.\n");
1438 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1423 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1439 return -EPROBE_DEFER; 1424 return -EPROBE_DEFER;
1440 } else 1425 } else
@@ -1448,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1448 1433
1449 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1434 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1450 MLX4_COMM_GET_IF_REV(slave_read)) { 1435 MLX4_COMM_GET_IF_REV(slave_read)) {
1451 mlx4_err(dev, "slave driver version is not supported" 1436 mlx4_err(dev, "slave driver version is not supported by the master\n");
1452 " by the master\n");
1453 goto err; 1437 goto err;
1454 } 1438 }
1455 1439
@@ -1527,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1527 1511
1528 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1512 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1529 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1513 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1530 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " 1514 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1531 "set to use B0 steering. Falling back to A0 steering mode.\n");
1532 } 1515 }
1533 dev->oper_log_mgm_entry_size = 1516 dev->oper_log_mgm_entry_size =
1534 mlx4_log_num_mgm_entry_size > 0 ? 1517 mlx4_log_num_mgm_entry_size > 0 ?
@@ -1536,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1536 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1519 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1537 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1520 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1538 } 1521 }
1539 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " 1522 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1540 "modparam log_num_mgm_entry_size = %d\n",
1541 mlx4_steering_mode_str(dev->caps.steering_mode), 1523 mlx4_steering_mode_str(dev->caps.steering_mode),
1542 dev->oper_log_mgm_entry_size, 1524 dev->oper_log_mgm_entry_size,
1543 mlx4_log_num_mgm_entry_size); 1525 mlx4_log_num_mgm_entry_size);
@@ -1571,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1571 err = mlx4_QUERY_FW(dev); 1553 err = mlx4_QUERY_FW(dev);
1572 if (err) { 1554 if (err) {
1573 if (err == -EACCES) 1555 if (err == -EACCES)
1574 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1556 mlx4_info(dev, "non-primary physical function, skipping\n");
1575 else 1557 else
1576 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1558 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
1577 return err; 1559 return err;
1578 } 1560 }
1579 1561
1580 err = mlx4_load_fw(dev); 1562 err = mlx4_load_fw(dev);
1581 if (err) { 1563 if (err) {
1582 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1564 mlx4_err(dev, "Failed to start FW, aborting\n");
1583 return err; 1565 return err;
1584 } 1566 }
1585 1567
@@ -1591,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1591 1573
1592 err = mlx4_dev_cap(dev, &dev_cap); 1574 err = mlx4_dev_cap(dev, &dev_cap);
1593 if (err) { 1575 if (err) {
1594 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1576 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1595 goto err_stop_fw; 1577 goto err_stop_fw;
1596 } 1578 }
1597 1579
@@ -1632,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1632 1614
1633 err = mlx4_INIT_HCA(dev, &init_hca); 1615 err = mlx4_INIT_HCA(dev, &init_hca);
1634 if (err) { 1616 if (err) {
1635 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1617 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1636 goto err_free_icm; 1618 goto err_free_icm;
1637 } 1619 }
1638 /* 1620 /*
@@ -1643,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1643 memset(&init_hca, 0, sizeof(init_hca)); 1625 memset(&init_hca, 0, sizeof(init_hca));
1644 err = mlx4_QUERY_HCA(dev, &init_hca); 1626 err = mlx4_QUERY_HCA(dev, &init_hca);
1645 if (err) { 1627 if (err) {
1646 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); 1628 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
1647 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1629 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1648 } else { 1630 } else {
1649 dev->caps.hca_core_clock = 1631 dev->caps.hca_core_clock =
@@ -1656,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1656 if (!dev->caps.hca_core_clock) { 1638 if (!dev->caps.hca_core_clock) {
1657 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1639 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1658 mlx4_err(dev, 1640 mlx4_err(dev,
1659 "HCA frequency is 0. Timestamping is not supported."); 1641 "HCA frequency is 0 - timestamping is not supported\n");
1660 } else if (map_internal_clock(dev)) { 1642 } else if (map_internal_clock(dev)) {
1661 /* 1643 /*
1662 * Map internal clock, 1644 * Map internal clock,
1663 * in case of failure disable timestamping 1645 * in case of failure disable timestamping
1664 */ 1646 */
1665 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1647 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1666 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); 1648 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1667 } 1649 }
1668 } 1650 }
1669 } else { 1651 } else {
@@ -1690,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1690 1672
1691 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1673 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1692 if (err) { 1674 if (err) {
1693 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1675 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
1694 goto unmap_bf; 1676 goto unmap_bf;
1695 } 1677 }
1696 1678
@@ -1808,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1808 1790
1809 err = mlx4_init_uar_table(dev); 1791 err = mlx4_init_uar_table(dev);
1810 if (err) { 1792 if (err) {
1811 mlx4_err(dev, "Failed to initialize " 1793 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
1812 "user access region table, aborting.\n"); 1794 return err;
1813 return err;
1814 } 1795 }
1815 1796
1816 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1797 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1817 if (err) { 1798 if (err) {
1818 mlx4_err(dev, "Failed to allocate driver access region, " 1799 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
1819 "aborting.\n");
1820 goto err_uar_table_free; 1800 goto err_uar_table_free;
1821 } 1801 }
1822 1802
1823 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1803 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1824 if (!priv->kar) { 1804 if (!priv->kar) {
1825 mlx4_err(dev, "Couldn't map kernel access region, " 1805 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
1826 "aborting.\n");
1827 err = -ENOMEM; 1806 err = -ENOMEM;
1828 goto err_uar_free; 1807 goto err_uar_free;
1829 } 1808 }
1830 1809
1831 err = mlx4_init_pd_table(dev); 1810 err = mlx4_init_pd_table(dev);
1832 if (err) { 1811 if (err) {
1833 mlx4_err(dev, "Failed to initialize " 1812 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
1834 "protection domain table, aborting.\n");
1835 goto err_kar_unmap; 1813 goto err_kar_unmap;
1836 } 1814 }
1837 1815
1838 err = mlx4_init_xrcd_table(dev); 1816 err = mlx4_init_xrcd_table(dev);
1839 if (err) { 1817 if (err) {
1840 mlx4_err(dev, "Failed to initialize " 1818 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
1841 "reliable connection domain table, aborting.\n");
1842 goto err_pd_table_free; 1819 goto err_pd_table_free;
1843 } 1820 }
1844 1821
1845 err = mlx4_init_mr_table(dev); 1822 err = mlx4_init_mr_table(dev);
1846 if (err) { 1823 if (err) {
1847 mlx4_err(dev, "Failed to initialize " 1824 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
1848 "memory region table, aborting.\n");
1849 goto err_xrcd_table_free; 1825 goto err_xrcd_table_free;
1850 } 1826 }
1851 1827
1852 if (!mlx4_is_slave(dev)) { 1828 if (!mlx4_is_slave(dev)) {
1853 err = mlx4_init_mcg_table(dev); 1829 err = mlx4_init_mcg_table(dev);
1854 if (err) { 1830 if (err) {
1855 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); 1831 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
1856 goto err_mr_table_free; 1832 goto err_mr_table_free;
1857 } 1833 }
1858 } 1834 }
1859 1835
1860 err = mlx4_init_eq_table(dev); 1836 err = mlx4_init_eq_table(dev);
1861 if (err) { 1837 if (err) {
1862 mlx4_err(dev, "Failed to initialize " 1838 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
1863 "event queue table, aborting.\n");
1864 goto err_mcg_table_free; 1839 goto err_mcg_table_free;
1865 } 1840 }
1866 1841
1867 err = mlx4_cmd_use_events(dev); 1842 err = mlx4_cmd_use_events(dev);
1868 if (err) { 1843 if (err) {
1869 mlx4_err(dev, "Failed to switch to event-driven " 1844 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
1870 "firmware commands, aborting.\n");
1871 goto err_eq_table_free; 1845 goto err_eq_table_free;
1872 } 1846 }
1873 1847
1874 err = mlx4_NOP(dev); 1848 err = mlx4_NOP(dev);
1875 if (err) { 1849 if (err) {
1876 if (dev->flags & MLX4_FLAG_MSI_X) { 1850 if (dev->flags & MLX4_FLAG_MSI_X) {
1877 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1851 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
1878 "interrupt IRQ %d).\n",
1879 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1852 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1880 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1853 mlx4_warn(dev, "Trying again without MSI-X\n");
1881 } else { 1854 } else {
1882 mlx4_err(dev, "NOP command failed to generate interrupt " 1855 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
1883 "(IRQ %d), aborting.\n",
1884 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1856 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1885 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1857 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1886 } 1858 }
@@ -1892,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1892 1864
1893 err = mlx4_init_cq_table(dev); 1865 err = mlx4_init_cq_table(dev);
1894 if (err) { 1866 if (err) {
1895 mlx4_err(dev, "Failed to initialize " 1867 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
1896 "completion queue table, aborting.\n");
1897 goto err_cmd_poll; 1868 goto err_cmd_poll;
1898 } 1869 }
1899 1870
1900 err = mlx4_init_srq_table(dev); 1871 err = mlx4_init_srq_table(dev);
1901 if (err) { 1872 if (err) {
1902 mlx4_err(dev, "Failed to initialize " 1873 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
1903 "shared receive queue table, aborting.\n");
1904 goto err_cq_table_free; 1874 goto err_cq_table_free;
1905 } 1875 }
1906 1876
1907 err = mlx4_init_qp_table(dev); 1877 err = mlx4_init_qp_table(dev);
1908 if (err) { 1878 if (err) {
1909 mlx4_err(dev, "Failed to initialize " 1879 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
1910 "queue pair table, aborting.\n");
1911 goto err_srq_table_free; 1880 goto err_srq_table_free;
1912 } 1881 }
1913 1882
1914 err = mlx4_init_counters_table(dev); 1883 err = mlx4_init_counters_table(dev);
1915 if (err && err != -ENOENT) { 1884 if (err && err != -ENOENT) {
1916 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1885 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
1917 goto err_qp_table_free; 1886 goto err_qp_table_free;
1918 } 1887 }
1919 1888
@@ -1923,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1923 err = mlx4_get_port_ib_caps(dev, port, 1892 err = mlx4_get_port_ib_caps(dev, port,
1924 &ib_port_default_caps); 1893 &ib_port_default_caps);
1925 if (err) 1894 if (err)
1926 mlx4_warn(dev, "failed to get port %d default " 1895 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
1927 "ib capabilities (%d). Continuing " 1896 port, err);
1928 "with caps = 0\n", port, err);
1929 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1897 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1930 1898
1931 /* initialize per-slave default ib port capabilities */ 1899 /* initialize per-slave default ib port capabilities */
@@ -1935,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1935 if (i == mlx4_master_func_num(dev)) 1903 if (i == mlx4_master_func_num(dev))
1936 continue; 1904 continue;
1937 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 1905 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1938 ib_port_default_caps; 1906 ib_port_default_caps;
1939 } 1907 }
1940 } 1908 }
1941 1909
@@ -1948,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1948 dev->caps.pkey_table_len[port] : -1); 1916 dev->caps.pkey_table_len[port] : -1);
1949 if (err) { 1917 if (err) {
1950 mlx4_err(dev, "Failed to set port %d, aborting\n", 1918 mlx4_err(dev, "Failed to set port %d, aborting\n",
1951 port); 1919 port);
1952 goto err_counters_table_free; 1920 goto err_counters_table_free;
1953 } 1921 }
1954 } 1922 }
@@ -2024,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2024 kfree(entries); 1992 kfree(entries);
2025 goto no_msi; 1993 goto no_msi;
2026 } else if (nreq < MSIX_LEGACY_SZ + 1994 } else if (nreq < MSIX_LEGACY_SZ +
2027 dev->caps.num_ports * MIN_MSIX_P_PORT) { 1995 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2028 /*Working in legacy mode , all EQ's shared*/ 1996 /*Working in legacy mode , all EQ's shared*/
2029 dev->caps.comp_pool = 0; 1997 dev->caps.comp_pool = 0;
2030 dev->caps.num_comp_vectors = nreq - 1; 1998 dev->caps.num_comp_vectors = nreq - 1;
@@ -2225,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2225 2193
2226 err = pci_enable_device(pdev); 2194 err = pci_enable_device(pdev);
2227 if (err) { 2195 if (err) {
2228 dev_err(&pdev->dev, "Cannot enable PCI device, " 2196 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
2229 "aborting.\n");
2230 return err; 2197 return err;
2231 } 2198 }
2232 2199
@@ -2273,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2273 */ 2240 */
2274 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 2241 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2275 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2242 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2276 dev_err(&pdev->dev, "Missing DCS, aborting." 2243 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2277 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2278 pci_dev_data, pci_resource_flags(pdev, 0)); 2244 pci_dev_data, pci_resource_flags(pdev, 0));
2279 err = -ENODEV; 2245 err = -ENODEV;
2280 goto err_disable_pdev; 2246 goto err_disable_pdev;
2281 } 2247 }
2282 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 2248 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2283 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 2249 dev_err(&pdev->dev, "Missing UAR, aborting\n");
2284 err = -ENODEV; 2250 err = -ENODEV;
2285 goto err_disable_pdev; 2251 goto err_disable_pdev;
2286 } 2252 }
@@ -2295,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2295 2261
2296 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2262 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2297 if (err) { 2263 if (err) {
2298 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 2264 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
2299 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2265 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2300 if (err) { 2266 if (err) {
2301 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 2267 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
2302 goto err_release_regions; 2268 goto err_release_regions;
2303 } 2269 }
2304 } 2270 }
2305 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2271 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2306 if (err) { 2272 if (err) {
2307 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 2273 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2308 "consistent PCI DMA mask.\n");
2309 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2274 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2310 if (err) { 2275 if (err) {
2311 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 2276 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
2312 "aborting.\n");
2313 goto err_release_regions; 2277 goto err_release_regions;
2314 } 2278 }
2315 } 2279 }
@@ -2340,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2340 if (total_vfs) { 2304 if (total_vfs) {
2341 unsigned vfs_offset = 0; 2305 unsigned vfs_offset = 0;
2342 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 2306 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2343 vfs_offset + nvfs[i] < extended_func_num(pdev); 2307 vfs_offset + nvfs[i] < extended_func_num(pdev);
2344 vfs_offset += nvfs[i], i++) 2308 vfs_offset += nvfs[i], i++)
2345 ; 2309 ;
2346 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 2310 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2366,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2366 if (err < 0) 2330 if (err < 0)
2367 goto err_free_dev; 2331 goto err_free_dev;
2368 else { 2332 else {
2369 mlx4_warn(dev, "Multiple PFs not yet supported." 2333 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2370 " Skipping PF.\n");
2371 err = -EINVAL; 2334 err = -EINVAL;
2372 goto err_free_dev; 2335 goto err_free_dev;
2373 } 2336 }
@@ -2377,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2377 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", 2340 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2378 total_vfs); 2341 total_vfs);
2379 dev->dev_vfs = kzalloc( 2342 dev->dev_vfs = kzalloc(
2380 total_vfs * sizeof(*dev->dev_vfs), 2343 total_vfs * sizeof(*dev->dev_vfs),
2381 GFP_KERNEL); 2344 GFP_KERNEL);
2382 if (NULL == dev->dev_vfs) { 2345 if (NULL == dev->dev_vfs) {
2383 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2346 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2384 err = 0; 2347 err = 0;
@@ -2386,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2386 atomic_inc(&pf_loading); 2349 atomic_inc(&pf_loading);
2387 err = pci_enable_sriov(pdev, total_vfs); 2350 err = pci_enable_sriov(pdev, total_vfs);
2388 if (err) { 2351 if (err) {
2389 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2352 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2390 err); 2353 err);
2391 atomic_dec(&pf_loading); 2354 atomic_dec(&pf_loading);
2392 err = 0; 2355 err = 0;
2393 } else { 2356 } else {
2394 mlx4_warn(dev, "Running in master mode\n"); 2357 mlx4_warn(dev, "Running in master mode\n");
2395 dev->flags |= MLX4_FLAG_SRIOV | 2358 dev->flags |= MLX4_FLAG_SRIOV |
2396 MLX4_FLAG_MASTER; 2359 MLX4_FLAG_MASTER;
2397 dev->num_vfs = total_vfs; 2360 dev->num_vfs = total_vfs;
2398 sriov_initialized = 1; 2361 sriov_initialized = 1;
2399 } 2362 }
@@ -2410,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2410 */ 2373 */
2411 err = mlx4_reset(dev); 2374 err = mlx4_reset(dev);
2412 if (err) { 2375 if (err) {
2413 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2376 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2414 goto err_rel_own; 2377 goto err_rel_own;
2415 } 2378 }
2416 } 2379 }
@@ -2418,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2418slave_start: 2381slave_start:
2419 err = mlx4_cmd_init(dev); 2382 err = mlx4_cmd_init(dev);
2420 if (err) { 2383 if (err) {
2421 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 2384 mlx4_err(dev, "Failed to init command interface, aborting\n");
2422 goto err_sriov; 2385 goto err_sriov;
2423 } 2386 }
2424 2387
@@ -2432,8 +2395,7 @@ slave_start:
2432 dev->num_slaves = 0; 2395 dev->num_slaves = 0;
2433 err = mlx4_multi_func_init(dev); 2396 err = mlx4_multi_func_init(dev);
2434 if (err) { 2397 if (err) {
2435 mlx4_err(dev, "Failed to init slave mfunc" 2398 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2436 " interface, aborting.\n");
2437 goto err_cmd; 2399 goto err_cmd;
2438 } 2400 }
2439 } 2401 }
@@ -2465,8 +2427,7 @@ slave_start:
2465 unsigned sum = 0; 2427 unsigned sum = 0;
2466 err = mlx4_multi_func_init(dev); 2428 err = mlx4_multi_func_init(dev);
2467 if (err) { 2429 if (err) {
2468 mlx4_err(dev, "Failed to init master mfunc" 2430 mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
2469 "interface, aborting.\n");
2470 goto err_close; 2431 goto err_close;
2471 } 2432 }
2472 if (sriov_initialized) { 2433 if (sriov_initialized) {
@@ -2477,10 +2438,7 @@ slave_start:
2477 if (ib_ports && 2438 if (ib_ports &&
2478 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2479 mlx4_err(dev, 2440 mlx4_err(dev,
2480 "Invalid syntax of num_vfs/probe_vfs " 2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2481 "with IB port. Single port VFs syntax"
2482 " is only supported when all ports "
2483 "are configured as ethernet\n");
2484 goto err_close; 2442 goto err_close;
2485 } 2443 }
2486 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { 2444 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2506,8 +2464,7 @@ slave_start:
2506 if ((mlx4_is_mfunc(dev)) && 2464 if ((mlx4_is_mfunc(dev)) &&
2507 !(dev->flags & MLX4_FLAG_MSI_X)) { 2465 !(dev->flags & MLX4_FLAG_MSI_X)) {
2508 err = -ENOSYS; 2466 err = -ENOSYS;
2509 mlx4_err(dev, "INTx is not supported in multi-function mode." 2467 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
2510 " aborting.\n");
2511 goto err_free_eq; 2468 goto err_free_eq;
2512 } 2469 }
2513 2470
@@ -2660,7 +2617,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
2660 /* in SRIOV it is not allowed to unload the pf's 2617 /* in SRIOV it is not allowed to unload the pf's
2661 * driver while there are alive vf's */ 2618 * driver while there are alive vf's */
2662 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) 2619 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2663 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 2620 pr_warn("Removing PF when there are assigned VF's !!!\n");
2664 mlx4_stop_sense(dev); 2621 mlx4_stop_sense(dev);
2665 mlx4_unregister_device(dev); 2622 mlx4_unregister_device(dev);
2666 2623
@@ -2824,7 +2781,7 @@ static struct pci_driver mlx4_driver = {
2824 .name = DRV_NAME, 2781 .name = DRV_NAME,
2825 .id_table = mlx4_pci_table, 2782 .id_table = mlx4_pci_table,
2826 .probe = mlx4_init_one, 2783 .probe = mlx4_init_one,
2827 .shutdown = mlx4_remove_one, 2784 .shutdown = __mlx4_remove_one,
2828 .remove = mlx4_remove_one, 2785 .remove = mlx4_remove_one,
2829 .err_handler = &mlx4_err_handler, 2786 .err_handler = &mlx4_err_handler,
2830}; 2787};
@@ -2832,33 +2789,36 @@ static struct pci_driver mlx4_driver = {
2832static int __init mlx4_verify_params(void) 2789static int __init mlx4_verify_params(void)
2833{ 2790{
2834 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2791 if ((log_num_mac < 0) || (log_num_mac > 7)) {
2835 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2792 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
2836 return -1; 2793 return -1;
2837 } 2794 }
2838 2795
2839 if (log_num_vlan != 0) 2796 if (log_num_vlan != 0)
2840 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2797 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2841 MLX4_LOG_NUM_VLANS); 2798 MLX4_LOG_NUM_VLANS);
2799
2800 if (use_prio != 0)
2801 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
2842 2802
2843 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2803 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2844 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2804 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
2805 log_mtts_per_seg);
2845 return -1; 2806 return -1;
2846 } 2807 }
2847 2808
2848 /* Check if module param for ports type has legal combination */ 2809 /* Check if module param for ports type has legal combination */
2849 if (port_type_array[0] == false && port_type_array[1] == true) { 2810 if (port_type_array[0] == false && port_type_array[1] == true) {
2850 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2811 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2851 port_type_array[0] = true; 2812 port_type_array[0] = true;
2852 } 2813 }
2853 2814
2854 if (mlx4_log_num_mgm_entry_size != -1 && 2815 if (mlx4_log_num_mgm_entry_size != -1 &&
2855 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 2816 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2856 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { 2817 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2857 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " 2818 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
2858 "in legal range (-1 or %d..%d)\n", 2819 mlx4_log_num_mgm_entry_size,
2859 mlx4_log_num_mgm_entry_size, 2820 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2860 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 2821 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2861 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2862 return -1; 2822 return -1;
2863 } 2823 }
2864 2824
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..4c36def8e10f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
638 638
639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
640 if (*index != hash) { 640 if (*index != hash) {
641 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 641 mlx4_err(dev, "Found zero MGID in AMGM\n");
642 err = -EINVAL; 642 err = -EINVAL;
643 } 643 }
644 return err; 644 return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
874 mlx4_err(dev, "%s", buf); 874 mlx4_err(dev, "%s", buf);
875 875
876 if (len >= BUF_SIZE) 876 if (len >= BUF_SIZE)
877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
878} 878}
879 879
880int mlx4_flow_attach(struct mlx4_dev *dev, 880int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
897 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 897 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
898 if (ret < 0) { 898 if (ret < 0) {
899 mlx4_free_cmd_mailbox(dev, mailbox); 899 mlx4_free_cmd_mailbox(dev, mailbox);
900 return -EINVAL; 900 return ret;
901 } 901 }
902 size += ret; 902 size += ret;
903 } 903 }
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
906 if (ret == -ENOMEM) 906 if (ret == -ENOMEM)
907 mlx4_err_rule(dev, 907 mlx4_err_rule(dev,
908 "mcg table is full. Fail to register network rule.\n", 908 "mcg table is full. Fail to register network rule\n",
909 rule); 909 rule);
910 else if (ret) 910 else if (ret)
911 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 911 mlx4_err_rule(dev, "Fail to register network rule\n", rule);
912 912
913 mlx4_free_cmd_mailbox(dev, mailbox); 913 mlx4_free_cmd_mailbox(dev, mailbox);
914 914
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
994 994
995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
996 if (members_count == dev->caps.num_qp_per_mgm) { 996 if (members_count == dev->caps.num_qp_per_mgm) {
997 mlx4_err(dev, "MGM at index %x is full.\n", index); 997 mlx4_err(dev, "MGM at index %x is full\n", index);
998 err = -ENOMEM; 998 err = -ENOMEM;
999 goto out; 999 goto out;
1000 } 1000 }
@@ -1042,7 +1042,7 @@ out:
1042 } 1042 }
1043 if (err && link && index != -1) { 1043 if (err && link && index != -1) {
1044 if (index < dev->caps.num_mgms) 1044 if (index < dev->caps.num_mgms)
1045 mlx4_warn(dev, "Got AMGM index %d < %d", 1045 mlx4_warn(dev, "Got AMGM index %d < %d\n",
1046 index, dev->caps.num_mgms); 1046 index, dev->caps.num_mgms);
1047 else 1047 else
1048 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1048 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1133 1133
1134 if (amgm_index) { 1134 if (amgm_index) {
1135 if (amgm_index < dev->caps.num_mgms) 1135 if (amgm_index < dev->caps.num_mgms)
1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
1137 index, amgm_index, dev->caps.num_mgms); 1137 index, amgm_index, dev->caps.num_mgms);
1138 else 1138 else
1139 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1139 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1153 goto out; 1153 goto out;
1154 1154
1155 if (index < dev->caps.num_mgms) 1155 if (index < dev->caps.num_mgms)
1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
1157 prev, index, dev->caps.num_mgms); 1157 prev, index, dev->caps.num_mgms);
1158 else 1158 else
1159 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1159 mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7a0665beebb1..1d8af7336807 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -221,18 +221,19 @@ extern int mlx4_debug_level;
221#define mlx4_debug_level (0) 221#define mlx4_debug_level (0)
222#endif /* CONFIG_MLX4_DEBUG */ 222#endif /* CONFIG_MLX4_DEBUG */
223 223
224#define mlx4_dbg(mdev, format, arg...) \ 224#define mlx4_dbg(mdev, format, ...) \
225do { \ 225do { \
226 if (mlx4_debug_level) \ 226 if (mlx4_debug_level) \
227 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ 227 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
228 ##__VA_ARGS__); \
228} while (0) 229} while (0)
229 230
230#define mlx4_err(mdev, format, arg...) \ 231#define mlx4_err(mdev, format, ...) \
231 dev_err(&mdev->pdev->dev, format, ##arg) 232 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
232#define mlx4_info(mdev, format, arg...) \ 233#define mlx4_info(mdev, format, ...) \
233 dev_info(&mdev->pdev->dev, format, ##arg) 234 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
234#define mlx4_warn(mdev, format, arg...) \ 235#define mlx4_warn(mdev, format, ...) \
235 dev_warn(&mdev->pdev->dev, format, ##arg) 236 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
236 237
237extern int mlx4_log_num_mgm_entry_size; 238extern int mlx4_log_num_mgm_entry_size;
238extern int log_mtts_per_seg; 239extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..0e15295bedd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
313 unsigned long csum_ok; 313 unsigned long csum_ok;
314 unsigned long csum_none; 314 unsigned long csum_none;
315 int hwtstamp_rx_filter; 315 int hwtstamp_rx_filter;
316 cpumask_var_t affinity_mask;
316}; 317};
317 318
318struct mlx4_en_cq { 319struct mlx4_en_cq {
@@ -830,26 +831,26 @@ __printf(3, 4)
830int en_print(const char *level, const struct mlx4_en_priv *priv, 831int en_print(const char *level, const struct mlx4_en_priv *priv,
831 const char *format, ...); 832 const char *format, ...);
832 833
833#define en_dbg(mlevel, priv, format, arg...) \ 834#define en_dbg(mlevel, priv, format, ...) \
834do { \ 835do { \
835 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 836 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
836 en_print(KERN_DEBUG, priv, format, ##arg); \ 837 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
837} while (0) 838} while (0)
838#define en_warn(priv, format, arg...) \ 839#define en_warn(priv, format, ...) \
839 en_print(KERN_WARNING, priv, format, ##arg) 840 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
840#define en_err(priv, format, arg...) \ 841#define en_err(priv, format, ...) \
841 en_print(KERN_ERR, priv, format, ##arg) 842 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
842#define en_info(priv, format, arg...) \ 843#define en_info(priv, format, ...) \
843 en_print(KERN_INFO, priv, format, ## arg) 844 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
844 845
845#define mlx4_err(mdev, format, arg...) \ 846#define mlx4_err(mdev, format, ...) \
846 pr_err("%s %s: " format, DRV_NAME, \ 847 pr_err(DRV_NAME " %s: " format, \
847 dev_name(&mdev->pdev->dev), ##arg) 848 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
848#define mlx4_info(mdev, format, arg...) \ 849#define mlx4_info(mdev, format, ...) \
849 pr_info("%s %s: " format, DRV_NAME, \ 850 pr_info(DRV_NAME " %s: " format, \
850 dev_name(&mdev->pdev->dev), ##arg) 851 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
851#define mlx4_warn(mdev, format, arg...) \ 852#define mlx4_warn(mdev, format, ...) \
852 pr_warning("%s %s: " format, DRV_NAME, \ 853 pr_warn(DRV_NAME " %s: " format, \
853 dev_name(&mdev->pdev->dev), ##arg) 854 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
854 855
855#endif 856#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 4c71dafad217..2839abb878a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
250 MLX4_CMD_TIME_CLASS_A, 250 MLX4_CMD_TIME_CLASS_A,
251 MLX4_CMD_WRAPPED); 251 MLX4_CMD_WRAPPED);
252 if (err) 252 if (err)
253 mlx4_warn(dev, "Failed to free mtt range at:" 253 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
254 "%d order:%d\n", offset, order); 254 offset, order);
255 return; 255 return;
256 } 256 }
257 __mlx4_free_mtt_range(dev, offset, order); 257 __mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
436 key_to_hw_index(mr->key) & 436 key_to_hw_index(mr->key) &
437 (dev->caps.num_mpts - 1)); 437 (dev->caps.num_mpts - 1));
438 if (err) { 438 if (err) {
439 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); 439 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
440 mlx4_warn(dev, "MR has MWs bound to it.\n"); 440 err);
441 return err; 441 return err;
442 } 442 }
443 443
@@ -774,7 +774,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
774 mlx4_alloc_mtt_range(dev, 774 mlx4_alloc_mtt_range(dev,
775 fls(dev->caps.reserved_mtts - 1)); 775 fls(dev->caps.reserved_mtts - 1));
776 if (priv->reserved_mtts < 0) { 776 if (priv->reserved_mtts < 0) {
777 mlx4_warn(dev, "MTT table of order %u is too small.\n", 777 mlx4_warn(dev, "MTT table of order %u is too small\n",
778 mr_table->mtt_buddy.max_order); 778 mr_table->mtt_buddy.max_order);
779 err = -ENOMEM; 779 err = -ENOMEM;
780 goto err_reserve_mtts; 780 goto err_reserve_mtts;
@@ -955,8 +955,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
955 mailbox = mlx4_alloc_cmd_mailbox(dev); 955 mailbox = mlx4_alloc_cmd_mailbox(dev);
956 if (IS_ERR(mailbox)) { 956 if (IS_ERR(mailbox)) {
957 err = PTR_ERR(mailbox); 957 err = PTR_ERR(mailbox);
958 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" 958 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
959 " failed (%d)\n", err);
960 return; 959 return;
961 } 960 }
962 961
@@ -965,8 +964,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
965 (dev->caps.num_mpts - 1)); 964 (dev->caps.num_mpts - 1));
966 mlx4_free_cmd_mailbox(dev, mailbox); 965 mlx4_free_cmd_mailbox(dev, mailbox);
967 if (err) { 966 if (err) {
968 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", 967 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
969 err);
970 return; 968 return;
971 } 969 }
972 fmr->mr.enabled = MLX4_MPT_EN_SW; 970 fmr->mr.enabled = MLX4_MPT_EN_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 5ec6f203c6e6..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -254,8 +254,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
254 if (validate_index(dev, table, index)) 254 if (validate_index(dev, table, index))
255 goto out; 255 goto out;
256 if (--table->refs[index]) { 256 if (--table->refs[index]) {
257 mlx4_dbg(dev, "Have more references for index %d," 257 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
258 "no need to modify mac table\n", index); 258 index);
259 goto out; 259 goto out;
260 } 260 }
261 261
@@ -453,9 +453,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
453 } 453 }
454 454
455 if (--table->refs[index]) { 455 if (--table->refs[index]) {
456 mlx4_dbg(dev, "Have %d more references for index %d," 456 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
457 "no need to modify vlan table\n", table->refs[index], 457 table->refs[index], index);
458 index);
459 goto out; 458 goto out;
460 } 459 }
461 table->entries[index] = 0; 460 table->entries[index] = 0;
@@ -796,8 +795,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
796 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, 795 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
797 sizeof(gid_entry_tbl->raw))) { 796 sizeof(gid_entry_tbl->raw))) {
798 /* found duplicate */ 797 /* found duplicate */
799 mlx4_warn(dev, "requested gid entry for slave:%d " 798 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
800 "is a duplicate of gid at index %d\n",
801 slave, i); 799 slave, i);
802 mutex_unlock(&(priv->port[port].gid_table.mutex)); 800 mutex_unlock(&(priv->port[port].gid_table.mutex));
803 return -EINVAL; 801 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
164 } 164 }
165 165
166 if (total_size > dev_cap->max_icm_sz) { 166 if (total_size > dev_cap->max_icm_sz) {
167 mlx4_err(dev, "Profile requires 0x%llx bytes; " 167 mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
168 "won't fit in 0x%llx bytes of context memory.\n", 168 (unsigned long long) total_size,
169 (unsigned long long) total_size, 169 (unsigned long long) dev_cap->max_icm_sz);
170 (unsigned long long) dev_cap->max_icm_sz);
171 kfree(profile); 170 kfree(profile);
172 return -ENOMEM; 171 return -ENOMEM;
173 } 172 }
174 173
175 if (profile[i].size) 174 if (profile[i].size)
176 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " 175 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
177 "size 0x%10llx\n", 176 i, res_name[profile[i].type],
178 i, res_name[profile[i].type], profile[i].log_num, 177 profile[i].log_num,
179 (unsigned long long) profile[i].start, 178 (unsigned long long) profile[i].start,
180 (unsigned long long) profile[i].size); 179 (unsigned long long) profile[i].size);
181 } 180 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 40af61947925..0dc31d85fc3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
264 MLX4_CMD_FREE_RES, 264 MLX4_CMD_FREE_RES,
265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
266 if (err) { 266 if (err) {
267 mlx4_warn(dev, "Failed to release qp range" 267 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
268 " base:%d cnt:%d\n", base_qpn, cnt); 268 base_qpn, cnt);
269 } 269 }
270 } else 270 } else
271 __mlx4_qp_release_range(dev, base_qpn, cnt); 271 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -612,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
612 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 612 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
613 context, 0, 0, qp); 613 context, 0, 0, qp);
614 if (err) { 614 if (err) {
615 mlx4_err(dev, "Failed to bring QP to state: " 615 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
616 "%d with error: %d\n",
617 states[i + 1], err); 616 states[i + 1], err);
618 return err; 617 return err;
619 } 618 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
72 hca_header = kmalloc(256, GFP_KERNEL); 72 hca_header = kmalloc(256, GFP_KERNEL);
73 if (!hca_header) { 73 if (!hca_header) {
74 err = -ENOMEM; 74 err = -ENOMEM;
75 mlx4_err(dev, "Couldn't allocate memory to save HCA " 75 mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
76 "PCI header, aborting.\n");
77 goto out; 76 goto out;
78 } 77 }
79 78
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
84 continue; 83 continue;
85 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { 84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
86 err = -ENODEV; 85 err = -ENODEV;
87 mlx4_err(dev, "Couldn't save HCA " 86 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
88 "PCI header, aborting.\n");
89 goto out; 87 goto out;
90 } 88 }
91 } 89 }
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
94 MLX4_RESET_SIZE); 92 MLX4_RESET_SIZE);
95 if (!reset) { 93 if (!reset) {
96 err = -ENOMEM; 94 err = -ENOMEM;
97 mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); 95 mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
98 goto out; 96 goto out;
99 } 97 }
100 98
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
133 131
134 if (vendor == 0xffff) { 132 if (vendor == 0xffff) {
135 err = -ENODEV; 133 err = -ENODEV;
136 mlx4_err(dev, "PCI device did not come back after reset, " 134 mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
137 "aborting.\n");
138 goto out; 135 goto out;
139 } 136 }
140 137
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
144 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 141 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
145 devctl)) { 142 devctl)) {
146 err = -ENODEV; 143 err = -ENODEV;
147 mlx4_err(dev, "Couldn't restore HCA PCI Express " 144 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
148 "Device Control register, aborting.\n");
149 goto out; 145 goto out;
150 } 146 }
151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
152 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 148 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
153 linkctl)) { 149 linkctl)) {
154 err = -ENODEV; 150 err = -ENODEV;
155 mlx4_err(dev, "Couldn't restore HCA PCI Express " 151 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
156 "Link control register, aborting.\n");
157 goto out; 152 goto out;
158 } 153 }
159 } 154 }
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
164 159
165 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { 160 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
166 err = -ENODEV; 161 err = -ENODEV;
167 mlx4_err(dev, "Couldn't restore HCA reg %x, " 162 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
168 "aborting.\n", i); 163 i);
169 goto out; 164 goto out;
170 } 165 }
171 } 166 }
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
173 if (pci_write_config_dword(dev->pdev, PCI_COMMAND, 168 if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
174 hca_header[PCI_COMMAND / 4])) { 169 hca_header[PCI_COMMAND / 4])) {
175 err = -ENODEV; 170 err = -ENODEV;
176 mlx4_err(dev, "Couldn't restore HCA COMMAND, " 171 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
177 "aborting.\n");
178 goto out; 172 goto out;
179 } 173 }
180 174
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2ba3b7623960..0efc1368e5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -279,7 +279,7 @@ enum qp_transition {
279}; 279};
280 280
281/* For Debug uses */ 281/* For Debug uses */
282static const char *ResourceType(enum mlx4_resource rt) 282static const char *resource_str(enum mlx4_resource rt)
283{ 283{
284 switch (rt) { 284 switch (rt) {
285 case RES_QP: return "RES_QP"; 285 case RES_QP: return "RES_QP";
@@ -307,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
307 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308 int err = -EINVAL; 308 int err = -EINVAL;
309 int allocated, free, reserved, guaranteed, from_free; 309 int allocated, free, reserved, guaranteed, from_free;
310 int from_rsvd;
310 311
311 if (slave > dev->num_vfs) 312 if (slave > dev->num_vfs)
312 return -EINVAL; 313 return -EINVAL;
@@ -321,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
321 res_alloc->res_reserved; 322 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave]; 323 guaranteed = res_alloc->guaranteed[slave];
323 324
324 if (allocated + count > res_alloc->quota[slave]) 325 if (allocated + count > res_alloc->quota[slave]) {
326 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 slave, port, resource_str(res_type), count,
328 allocated, res_alloc->quota[slave]);
325 goto out; 329 goto out;
330 }
326 331
327 if (allocated + count <= guaranteed) { 332 if (allocated + count <= guaranteed) {
328 err = 0; 333 err = 0;
334 from_rsvd = count;
329 } else { 335 } else {
330 /* portion may need to be obtained from free area */ 336 /* portion may need to be obtained from free area */
331 if (guaranteed - allocated > 0) 337 if (guaranteed - allocated > 0)
@@ -333,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
333 else 339 else
334 from_free = count; 340 from_free = count;
335 341
336 if (free - from_free > reserved) 342 from_rsvd = count - from_free;
343
344 if (free - from_free >= reserved)
337 err = 0; 345 err = 0;
346 else
347 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 slave, port, resource_str(res_type), free,
349 from_free, reserved);
338 } 350 }
339 351
340 if (!err) { 352 if (!err) {
@@ -342,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
342 if (port > 0) { 354 if (port > 0) {
343 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
344 res_alloc->res_port_free[port - 1] -= count; 356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
345 } else { 358 } else {
346 res_alloc->allocated[slave] += count; 359 res_alloc->allocated[slave] += count;
347 res_alloc->res_free -= count; 360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
348 } 362 }
349 } 363 }
350 364
@@ -360,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
360 struct mlx4_priv *priv = mlx4_priv(dev); 374 struct mlx4_priv *priv = mlx4_priv(dev);
361 struct resource_allocator *res_alloc = 375 struct resource_allocator *res_alloc =
362 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
363 378
364 if (slave > dev->num_vfs) 379 if (slave > dev->num_vfs)
365 return; 380 return;
366 381
367 spin_lock(&res_alloc->alloc_lock); 382 spin_lock(&res_alloc->alloc_lock);
383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
388
389 if (allocated - count >= guaranteed) {
390 from_rsvd = 0;
391 } else {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
395 else
396 from_rsvd = count;
397 }
398
368 if (port > 0) { 399 if (port > 0) {
369 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
370 res_alloc->res_port_free[port - 1] += count; 401 res_alloc->res_port_free[port - 1] += count;
402 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
371 } else { 403 } else {
372 res_alloc->allocated[slave] -= count; 404 res_alloc->allocated[slave] -= count;
373 res_alloc->res_free += count; 405 res_alloc->res_free += count;
406 res_alloc->res_reserved += from_rsvd;
374 } 407 }
375 408
376 spin_unlock(&res_alloc->alloc_lock); 409 spin_unlock(&res_alloc->alloc_lock);
@@ -963,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
963 ret = alloc_srq_tr(id); 996 ret = alloc_srq_tr(id);
964 break; 997 break;
965 case RES_MAC: 998 case RES_MAC:
966 printk(KERN_ERR "implementation missing\n"); 999 pr_err("implementation missing\n");
967 return NULL; 1000 return NULL;
968 case RES_COUNTER: 1001 case RES_COUNTER:
969 ret = alloc_counter_tr(id); 1002 ret = alloc_counter_tr(id);
@@ -1057,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
1057{ 1090{
1058 if (res->com.state == RES_MTT_BUSY || 1091 if (res->com.state == RES_MTT_BUSY ||
1059 atomic_read(&res->ref_count)) { 1092 atomic_read(&res->ref_count)) {
1060 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", 1093 pr_devel("%s-%d: state %s, ref_count %d\n",
1061 __func__, __LINE__, 1094 __func__, __LINE__,
1062 mtt_states_str(res->com.state), 1095 mtt_states_str(res->com.state),
1063 atomic_read(&res->ref_count)); 1096 atomic_read(&res->ref_count));
1064 return -EBUSY; 1097 return -EBUSY;
1065 } else if (res->com.state != RES_MTT_ALLOCATED) 1098 } else if (res->com.state != RES_MTT_ALLOCATED)
1066 return -EPERM; 1099 return -EPERM;
@@ -3897,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3897 } 3930 }
3898 } 3931 }
3899 if (!be_mac) { 3932 if (!be_mac) {
3900 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", 3933 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3901 port); 3934 port);
3902 return -EINVAL; 3935 return -EINVAL;
3903 } 3936 }
@@ -3994,7 +4027,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3994 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4027 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3995 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4028 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3996 if (err) { 4029 if (err) {
3997 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 4030 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
3998 return err; 4031 return err;
3999 } 4032 }
4000 rule_header = (struct _rule_hw *)(ctrl + 1); 4033 rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -4012,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4012 case MLX4_NET_TRANS_RULE_ID_IPV4: 4045 case MLX4_NET_TRANS_RULE_ID_IPV4:
4013 case MLX4_NET_TRANS_RULE_ID_TCP: 4046 case MLX4_NET_TRANS_RULE_ID_TCP:
4014 case MLX4_NET_TRANS_RULE_ID_UDP: 4047 case MLX4_NET_TRANS_RULE_ID_UDP:
4015 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 4048 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4016 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 4049 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4017 err = -EINVAL; 4050 err = -EINVAL;
4018 goto err_put; 4051 goto err_put;
@@ -4021,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4021 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 4054 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4022 break; 4055 break;
4023 default: 4056 default:
4024 pr_err("Corrupted mailbox.\n"); 4057 pr_err("Corrupted mailbox\n");
4025 err = -EINVAL; 4058 err = -EINVAL;
4026 goto err_put; 4059 goto err_put;
4027 } 4060 }
@@ -4035,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4035 4068
4036 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 4069 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4037 if (err) { 4070 if (err) {
4038 mlx4_err(dev, "Fail to add flow steering resources.\n "); 4071 mlx4_err(dev, "Fail to add flow steering resources\n");
4039 /* detach rule*/ 4072 /* detach rule*/
4040 mlx4_cmd(dev, vhcr->out_param, 0, 0, 4073 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4041 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4074 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4073,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4073 4106
4074 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 4107 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4075 if (err) { 4108 if (err) {
4076 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 4109 mlx4_err(dev, "Fail to remove flow steering resources\n");
4077 goto out; 4110 goto out;
4078 } 4111 }
4079 4112
@@ -4151,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
4151 if (print) 4184 if (print)
4152 mlx4_dbg(dev, 4185 mlx4_dbg(dev,
4153 "%s id 0x%llx is busy\n", 4186 "%s id 0x%llx is busy\n",
4154 ResourceType(type), 4187 resource_str(type),
4155 r->res_id); 4188 r->res_id);
4156 ++busy; 4189 ++busy;
4157 } else { 4190 } else {
@@ -4202,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4202 4235
4203 err = move_all_busy(dev, slave, RES_QP); 4236 err = move_all_busy(dev, slave, RES_QP);
4204 if (err) 4237 if (err)
4205 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" 4238 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4206 "for slave %d\n", slave); 4239 slave);
4207 4240
4208 spin_lock_irq(mlx4_tlock(dev)); 4241 spin_lock_irq(mlx4_tlock(dev));
4209 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4242 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4241,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4241 MLX4_CMD_TIME_CLASS_A, 4274 MLX4_CMD_TIME_CLASS_A,
4242 MLX4_CMD_NATIVE); 4275 MLX4_CMD_NATIVE);
4243 if (err) 4276 if (err)
4244 mlx4_dbg(dev, "rem_slave_qps: failed" 4277 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4245 " to move slave %d qpn %d to" 4278 slave, qp->local_qpn);
4246 " reset\n", slave,
4247 qp->local_qpn);
4248 atomic_dec(&qp->rcq->ref_count); 4279 atomic_dec(&qp->rcq->ref_count);
4249 atomic_dec(&qp->scq->ref_count); 4280 atomic_dec(&qp->scq->ref_count);
4250 atomic_dec(&qp->mtt->ref_count); 4281 atomic_dec(&qp->mtt->ref_count);
@@ -4278,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4278 4309
4279 err = move_all_busy(dev, slave, RES_SRQ); 4310 err = move_all_busy(dev, slave, RES_SRQ);
4280 if (err) 4311 if (err)
4281 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " 4312 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4282 "busy for slave %d\n", slave); 4313 slave);
4283 4314
4284 spin_lock_irq(mlx4_tlock(dev)); 4315 spin_lock_irq(mlx4_tlock(dev));
4285 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4316 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4309,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4309 MLX4_CMD_TIME_CLASS_A, 4340 MLX4_CMD_TIME_CLASS_A,
4310 MLX4_CMD_NATIVE); 4341 MLX4_CMD_NATIVE);
4311 if (err) 4342 if (err)
4312 mlx4_dbg(dev, "rem_slave_srqs: failed" 4343 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4313 " to move slave %d srq %d to"
4314 " SW ownership\n",
4315 slave, srqn); 4344 slave, srqn);
4316 4345
4317 atomic_dec(&srq->mtt->ref_count); 4346 atomic_dec(&srq->mtt->ref_count);
@@ -4346,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4346 4375
4347 err = move_all_busy(dev, slave, RES_CQ); 4376 err = move_all_busy(dev, slave, RES_CQ);
4348 if (err) 4377 if (err)
4349 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " 4378 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4350 "busy for slave %d\n", slave); 4379 slave);
4351 4380
4352 spin_lock_irq(mlx4_tlock(dev)); 4381 spin_lock_irq(mlx4_tlock(dev));
4353 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4382 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4377,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4377 MLX4_CMD_TIME_CLASS_A, 4406 MLX4_CMD_TIME_CLASS_A,
4378 MLX4_CMD_NATIVE); 4407 MLX4_CMD_NATIVE);
4379 if (err) 4408 if (err)
4380 mlx4_dbg(dev, "rem_slave_cqs: failed" 4409 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4381 " to move slave %d cq %d to"
4382 " SW ownership\n",
4383 slave, cqn); 4410 slave, cqn);
4384 atomic_dec(&cq->mtt->ref_count); 4411 atomic_dec(&cq->mtt->ref_count);
4385 state = RES_CQ_ALLOCATED; 4412 state = RES_CQ_ALLOCATED;
@@ -4411,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4411 4438
4412 err = move_all_busy(dev, slave, RES_MPT); 4439 err = move_all_busy(dev, slave, RES_MPT);
4413 if (err) 4440 if (err)
4414 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " 4441 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4415 "busy for slave %d\n", slave); 4442 slave);
4416 4443
4417 spin_lock_irq(mlx4_tlock(dev)); 4444 spin_lock_irq(mlx4_tlock(dev));
4418 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4445 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4447,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4447 MLX4_CMD_TIME_CLASS_A, 4474 MLX4_CMD_TIME_CLASS_A,
4448 MLX4_CMD_NATIVE); 4475 MLX4_CMD_NATIVE);
4449 if (err) 4476 if (err)
4450 mlx4_dbg(dev, "rem_slave_mrs: failed" 4477 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4451 " to move slave %d mpt %d to"
4452 " SW ownership\n",
4453 slave, mptn); 4478 slave, mptn);
4454 if (mpt->mtt) 4479 if (mpt->mtt)
4455 atomic_dec(&mpt->mtt->ref_count); 4480 atomic_dec(&mpt->mtt->ref_count);
@@ -4481,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4481 4506
4482 err = move_all_busy(dev, slave, RES_MTT); 4507 err = move_all_busy(dev, slave, RES_MTT);
4483 if (err) 4508 if (err)
4484 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " 4509 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4485 "busy for slave %d\n", slave); 4510 slave);
4486 4511
4487 spin_lock_irq(mlx4_tlock(dev)); 4512 spin_lock_irq(mlx4_tlock(dev));
4488 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4513 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4584,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4584 4609
4585 err = move_all_busy(dev, slave, RES_EQ); 4610 err = move_all_busy(dev, slave, RES_EQ);
4586 if (err) 4611 if (err)
4587 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " 4612 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4588 "busy for slave %d\n", slave); 4613 slave);
4589 4614
4590 spin_lock_irq(mlx4_tlock(dev)); 4615 spin_lock_irq(mlx4_tlock(dev));
4591 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 4616 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4617,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4617 MLX4_CMD_TIME_CLASS_A, 4642 MLX4_CMD_TIME_CLASS_A,
4618 MLX4_CMD_NATIVE); 4643 MLX4_CMD_NATIVE);
4619 if (err) 4644 if (err)
4620 mlx4_dbg(dev, "rem_slave_eqs: failed" 4645 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4621 " to move slave %d eqs %d to" 4646 slave, eqn);
4622 " SW ownership\n", slave, eqn);
4623 mlx4_free_cmd_mailbox(dev, mailbox); 4647 mlx4_free_cmd_mailbox(dev, mailbox);
4624 atomic_dec(&eq->mtt->ref_count); 4648 atomic_dec(&eq->mtt->ref_count);
4625 state = RES_EQ_RESERVED; 4649 state = RES_EQ_RESERVED;
@@ -4648,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4648 4672
4649 err = move_all_busy(dev, slave, RES_COUNTER); 4673 err = move_all_busy(dev, slave, RES_COUNTER);
4650 if (err) 4674 if (err)
4651 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " 4675 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4652 "busy for slave %d\n", slave); 4676 slave);
4653 4677
4654 spin_lock_irq(mlx4_tlock(dev)); 4678 spin_lock_irq(mlx4_tlock(dev));
4655 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4679 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4679,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4679 4703
4680 err = move_all_busy(dev, slave, RES_XRCD); 4704 err = move_all_busy(dev, slave, RES_XRCD);
4681 if (err) 4705 if (err)
4682 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " 4706 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4683 "busy for slave %d\n", slave); 4707 slave);
4684 4708
4685 spin_lock_irq(mlx4_tlock(dev)); 4709 spin_lock_irq(mlx4_tlock(dev));
4686 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 4710 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4825,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4825 0, MLX4_CMD_UPDATE_QP, 4849 0, MLX4_CMD_UPDATE_QP,
4826 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 4850 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4827 if (err) { 4851 if (err) {
4828 mlx4_info(dev, "UPDATE_QP failed for slave %d, " 4852 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4829 "port %d, qpn %d (%d)\n", 4853 work->slave, port, qp->local_qpn, err);
4830 work->slave, port, qp->local_qpn,
4831 err);
4832 errors++; 4854 errors++;
4833 } 4855 }
4834 } 4856 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
620 mlx5_command_str(msg_to_opcode(ent->in)), 620 mlx5_command_str(msg_to_opcode(ent->in)),
621 msg_to_opcode(ent->in)); 621 msg_to_opcode(ent->in));
622 } 622 }
623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, 623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
624 deliv_status_to_str(ent->status), ent->status); 624 err, deliv_status_to_str(ent->status), ent->status);
625 625
626 return err; 626 return err;
627} 627}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
208 */ 208 */
209 rmb(); 209 rmb();
210 210
211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); 211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
212 eq->eqn, eqe_type_str(eqe->type));
212 switch (eqe->type) { 213 switch (eqe->type) {
213 case MLX5_EVENT_TYPE_COMP: 214 case MLX5_EVENT_TYPE_COMP:
214 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 215 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 271 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 272 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 273
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 274 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
275 func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 276 mlx5_core_req_pages_handler(dev, func_id, npages);
275 } 277 }
276 break; 278 break;
277 279
278 280
279 default: 281 default:
280 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); 282 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
283 eqe->type, eq->eqn);
281 break; 284 break;
282 } 285 }
283 286
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
66 66
67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
68 if (err) { 68 if (err) {
69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
71 if (err) { 71 if (err) {
72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
73 return err; 73 return err;
74 } 74 }
75 } 75 }
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
78 if (err) { 78 if (err) {
79 dev_warn(&pdev->dev, 79 dev_warn(&pdev->dev,
80 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); 80 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
82 if (err) { 82 if (err) {
83 dev_err(&pdev->dev, 83 dev_err(&pdev->dev,
84 "Can't set consistent PCI DMA mask, aborting.\n"); 84 "Can't set consistent PCI DMA mask, aborting\n");
85 return err; 85 return err;
86 } 86 }
87 } 87 }
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
95 int err = 0; 95 int err = 0;
96 96
97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
98 dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); 98 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
99 return -ENODEV; 99 return -ENODEV;
100 } 100 }
101 101
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
319 319
320 err = pci_enable_device(pdev); 320 err = pci_enable_device(pdev);
321 if (err) { 321 if (err) {
322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
323 goto err_dbg; 323 goto err_dbg;
324 } 324 }
325 325
326 err = request_bar(pdev); 326 err = request_bar(pdev);
327 if (err) { 327 if (err) {
328 dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); 328 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
329 goto err_disable; 329 goto err_disable;
330 } 330 }
331 331
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
39 39
40extern int mlx5_core_debug_mask; 40extern int mlx5_core_debug_mask;
41 41
42#define mlx5_core_dbg(dev, format, arg...) \ 42#define mlx5_core_dbg(dev, format, ...) \
43pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 43 pr_debug("%s:%s:%d:(pid %d): " format, \
44 current->pid, ##arg) 44 (dev)->priv.name, __func__, __LINE__, current->pid, \
45 ##__VA_ARGS__)
45 46
46#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ 47#define mlx5_core_dbg_mask(dev, mask, format, ...) \
47do { \ 48do { \
48 if ((mask) & mlx5_core_debug_mask) \ 49 if ((mask) & mlx5_core_debug_mask) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ 50 mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
50 __func__, __LINE__, current->pid, ##arg); \
51} while (0) 51} while (0)
52 52
53#define mlx5_core_err(dev, format, arg...) \ 53#define mlx5_core_err(dev, format, ...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 54 pr_err("%s:%s:%d:(pid %d): " format, \
55 current->pid, ##arg) 55 (dev)->priv.name, __func__, __LINE__, current->pid, \
56 ##__VA_ARGS__)
56 57
57#define mlx5_core_warn(dev, format, arg...) \ 58#define mlx5_core_warn(dev, format, ...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 59 pr_warn("%s:%s:%d:(pid %d): " format, \
59 current->pid, ##arg) 60 (dev)->priv.name, __func__, __LINE__, current->pid, \
61 ##__VA_ARGS__)
60 62
61enum { 63enum {
62 MLX5_CMD_DATA, /* print command payload only */ 64 MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ac52a0fe2d3a..ba0401d4af50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
73 } 73 }
74 74
75 if (err) { 75 if (err) {
76 mlx5_core_dbg(dev, "cmd exec faile %d\n", err); 76 mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
77 return err; 77 return err;
78 } 78 }
79 79
@@ -195,7 +195,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
195 } 195 }
196 196
197 if (out.hdr.status) { 197 if (out.hdr.status) {
198 mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); 198 mlx5_core_err(dev, "create_psv bad status %d\n",
199 out.hdr.status);
199 return mlx5_cmd_status_to_err(&out.hdr); 200 return mlx5_cmd_status_to_err(&out.hdr);
200 } 201 }
201 202
@@ -224,7 +225,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
224 } 225 }
225 226
226 if (out.hdr.status) { 227 if (out.hdr.status) {
227 mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); 228 mlx5_core_err(dev, "destroy_psv bad status %d\n",
229 out.hdr.status);
228 err = mlx5_cmd_status_to_err(&out.hdr); 230 err = mlx5_cmd_status_to_err(&out.hdr);
229 goto out; 231 goto out;
230 } 232 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
311 in->num_entries = cpu_to_be32(npages); 311 in->num_entries = cpu_to_be32(npages);
312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
313 if (err) { 313 if (err) {
314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); 314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
315 func_id, npages, err);
315 goto out_alloc; 316 goto out_alloc;
316 } 317 }
317 dev->priv.fw_pages += npages; 318 dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
319 if (out.hdr.status) { 320 if (out.hdr.status) {
320 err = mlx5_cmd_status_to_err(&out.hdr); 321 err = mlx5_cmd_status_to_err(&out.hdr);
321 if (err) { 322 if (err) {
322 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); 323 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
324 func_id, npages, out.hdr.status);
323 goto out_alloc; 325 goto out_alloc;
324 } 326 }
325 } 327 }
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
378 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 380 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
379 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 381 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
380 if (err) { 382 if (err) {
381 mlx5_core_err(dev, "failed recliaming pages\n"); 383 mlx5_core_err(dev, "failed reclaiming pages\n");
382 goto out_free; 384 goto out_free;
383 } 385 }
384 dev->priv.fw_pages -= npages; 386 dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
414 err = give_pages(dev, req->func_id, req->npages, 1); 416 err = give_pages(dev, req->func_id, req->npages, 1);
415 417
416 if (err) 418 if (err)
417 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? 419 mlx5_core_warn(dev, "%s fail %d\n",
418 "reclaim" : "give", err); 420 req->npages < 0 ? "reclaim" : "give", err);
419 421
420 kfree(req); 422 kfree(req);
421} 423}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
487 optimal_reclaimed_pages(), 489 optimal_reclaimed_pages(),
488 &nclaimed); 490 &nclaimed);
489 if (err) { 491 if (err) {
490 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 492 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
493 err);
491 return err; 494 return err;
492 } 495 }
493 if (nclaimed) 496 if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
79 79
80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
81 if (err) { 81 if (err) {
82 mlx5_core_warn(dev, "ret %d", err); 82 mlx5_core_warn(dev, "ret %d\n", err);
83 return err; 83 return err;
84 } 84 }
85 85
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
96 err = radix_tree_insert(&table->tree, qp->qpn, qp); 96 err = radix_tree_insert(&table->tree, qp->qpn, qp);
97 spin_unlock_irq(&table->lock); 97 spin_unlock_irq(&table->lock);
98 if (err) { 98 if (err) {
99 mlx5_core_warn(dev, "err %d", err); 99 mlx5_core_warn(dev, "err %d\n", err);
100 goto err_cmd; 100 goto err_cmd;
101 } 101 }
102 102
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 16435b3cfa9f..6c7c78baedca 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
1504 if (ksp->phyiface_regs && ksp->link_irq == -1) { 1504 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1505 ks8695_init_switch(ksp); 1505 ks8695_init_switch(ksp);
1506 ksp->dtype = KS8695_DTYPE_LAN; 1506 ksp->dtype = KS8695_DTYPE_LAN;
1507 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1507 ndev->ethtool_ops = &ks8695_ethtool_ops;
1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) { 1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1509 ks8695_init_wan_phy(ksp); 1509 ks8695_init_wan_phy(ksp);
1510 ksp->dtype = KS8695_DTYPE_WAN; 1510 ksp->dtype = KS8695_DTYPE_WAN;
1511 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); 1511 ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
1512 } else { 1512 } else {
1513 /* No initialisation since HPNA does not have a PHY */ 1513 /* No initialisation since HPNA does not have a PHY */
1514 ksp->dtype = KS8695_DTYPE_HPNA; 1514 ksp->dtype = KS8695_DTYPE_HPNA;
1515 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1515 ndev->ethtool_ops = &ks8695_ethtool_ops;
1516 } 1516 }
1517 1517
1518 /* And bring up the net_device with the net core */ 1518 /* And bring up the net_device with the net core */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e0c92e0e5e1d..66d4ab703f45 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -26,6 +26,8 @@
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27 27
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/gpio.h>
30#include <linux/of_gpio.h>
29 31
30#include "ks8851.h" 32#include "ks8851.h"
31 33
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
85 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom 87 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
86 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. 88 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
87 * @vdd_reg: Optional regulator supplying the chip 89 * @vdd_reg: Optional regulator supplying the chip
90 * @vdd_io: Optional digital power supply for IO
91 * @gpio: Optional reset_n gpio
88 * 92 *
89 * The @lock ensures that the chip is protected when certain operations are 93 * The @lock ensures that the chip is protected when certain operations are
90 * in progress. When the read or write packet transfer is in progress, most 94 * in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
133 137
134 struct eeprom_93cx6 eeprom; 138 struct eeprom_93cx6 eeprom;
135 struct regulator *vdd_reg; 139 struct regulator *vdd_reg;
140 struct regulator *vdd_io;
141 int gpio;
136}; 142};
137 143
138static int msg_enable; 144static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
1404 struct ks8851_net *ks; 1410 struct ks8851_net *ks;
1405 int ret; 1411 int ret;
1406 unsigned cider; 1412 unsigned cider;
1413 int gpio;
1407 1414
1408 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1415 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1409 if (!ndev) 1416 if (!ndev)
@@ -1417,20 +1424,53 @@ static int ks8851_probe(struct spi_device *spi)
1417 ks->spidev = spi; 1424 ks->spidev = spi;
1418 ks->tx_space = 6144; 1425 ks->tx_space = 6144;
1419 1426
1420 ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); 1427 gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
1421 if (IS_ERR(ks->vdd_reg)) { 1428 0, NULL);
1422 ret = PTR_ERR(ks->vdd_reg); 1429 if (gpio == -EPROBE_DEFER) {
1423 if (ret == -EPROBE_DEFER) 1430 ret = gpio;
1424 goto err_reg; 1431 goto err_gpio;
1425 } else { 1432 }
1426 ret = regulator_enable(ks->vdd_reg); 1433
1434 ks->gpio = gpio;
1435 if (gpio_is_valid(gpio)) {
1436 ret = devm_gpio_request_one(&spi->dev, gpio,
1437 GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
1427 if (ret) { 1438 if (ret) {
1428 dev_err(&spi->dev, "regulator enable fail: %d\n", 1439 dev_err(&spi->dev, "reset gpio request failed\n");
1429 ret); 1440 goto err_gpio;
1430 goto err_reg_en;
1431 } 1441 }
1432 } 1442 }
1433 1443
1444 ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
1445 if (IS_ERR(ks->vdd_io)) {
1446 ret = PTR_ERR(ks->vdd_io);
1447 goto err_reg_io;
1448 }
1449
1450 ret = regulator_enable(ks->vdd_io);
1451 if (ret) {
1452 dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
1453 ret);
1454 goto err_reg_io;
1455 }
1456
1457 ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
1458 if (IS_ERR(ks->vdd_reg)) {
1459 ret = PTR_ERR(ks->vdd_reg);
1460 goto err_reg;
1461 }
1462
1463 ret = regulator_enable(ks->vdd_reg);
1464 if (ret) {
1465 dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
1466 ret);
1467 goto err_reg;
1468 }
1469
1470 if (gpio_is_valid(gpio)) {
1471 usleep_range(10000, 11000);
1472 gpio_set_value(gpio, 1);
1473 }
1434 1474
1435 mutex_init(&ks->lock); 1475 mutex_init(&ks->lock);
1436 spin_lock_init(&ks->statelock); 1476 spin_lock_init(&ks->statelock);
@@ -1471,7 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
1471 1511
1472 skb_queue_head_init(&ks->txq); 1512 skb_queue_head_init(&ks->txq);
1473 1513
1474 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); 1514 ndev->ethtool_ops = &ks8851_ethtool_ops;
1475 SET_NETDEV_DEV(ndev, &spi->dev); 1515 SET_NETDEV_DEV(ndev, &spi->dev);
1476 1516
1477 spi_set_drvdata(spi, ks); 1517 spi_set_drvdata(spi, ks);
@@ -1527,13 +1567,14 @@ err_netdev:
1527 free_irq(ndev->irq, ks); 1567 free_irq(ndev->irq, ks);
1528 1568
1529err_irq: 1569err_irq:
1570 if (gpio_is_valid(gpio))
1571 gpio_set_value(gpio, 0);
1530err_id: 1572err_id:
1531 if (!IS_ERR(ks->vdd_reg)) 1573 regulator_disable(ks->vdd_reg);
1532 regulator_disable(ks->vdd_reg);
1533err_reg_en:
1534 if (!IS_ERR(ks->vdd_reg))
1535 regulator_put(ks->vdd_reg);
1536err_reg: 1574err_reg:
1575 regulator_disable(ks->vdd_io);
1576err_reg_io:
1577err_gpio:
1537 free_netdev(ndev); 1578 free_netdev(ndev);
1538 return ret; 1579 return ret;
1539} 1580}
@@ -1547,18 +1588,24 @@ static int ks8851_remove(struct spi_device *spi)
1547 1588
1548 unregister_netdev(priv->netdev); 1589 unregister_netdev(priv->netdev);
1549 free_irq(spi->irq, priv); 1590 free_irq(spi->irq, priv);
1550 if (!IS_ERR(priv->vdd_reg)) { 1591 if (gpio_is_valid(priv->gpio))
1551 regulator_disable(priv->vdd_reg); 1592 gpio_set_value(priv->gpio, 0);
1552 regulator_put(priv->vdd_reg); 1593 regulator_disable(priv->vdd_reg);
1553 } 1594 regulator_disable(priv->vdd_io);
1554 free_netdev(priv->netdev); 1595 free_netdev(priv->netdev);
1555 1596
1556 return 0; 1597 return 0;
1557} 1598}
1558 1599
1600static const struct of_device_id ks8851_match_table[] = {
1601 { .compatible = "micrel,ks8851" },
1602 { }
1603};
1604
1559static struct spi_driver ks8851_driver = { 1605static struct spi_driver ks8851_driver = {
1560 .driver = { 1606 .driver = {
1561 .name = "ks8851", 1607 .name = "ks8851",
1608 .of_match_table = ks8851_match_table,
1562 .owner = THIS_MODULE, 1609 .owner = THIS_MODULE,
1563 .pm = &ks8851_pm_ops, 1610 .pm = &ks8851_pm_ops,
1564 }, 1611 },
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 14ac0e2bc09f..064a48d0c368 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
4930 * Only reset the hardware if time between calls is long 4930 * Only reset the hardware if time between calls is long
4931 * enough. 4931 * enough.
4932 */ 4932 */
4933 if (jiffies - last_reset <= dev->watchdog_timeo) 4933 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
4934 hw_priv = NULL; 4934 hw_priv = NULL;
4935 } 4935 }
4936 4936
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
7072 dev = alloc_etherdev(sizeof(struct dev_priv)); 7072 dev = alloc_etherdev(sizeof(struct dev_priv));
7073 if (!dev) 7073 if (!dev)
7074 goto pcidev_init_reg_err; 7074 goto pcidev_init_reg_err;
7075 SET_NETDEV_DEV(dev, &pdev->dev);
7075 info->netdev[i] = dev; 7076 info->netdev[i] = dev;
7076 7077
7077 priv = netdev_priv(dev); 7078 priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
7106 } 7107 }
7107 7108
7108 dev->netdev_ops = &netdev_ops; 7109 dev->netdev_ops = &netdev_ops;
7109 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7110 dev->ethtool_ops = &netdev_ethtool_ops;
7110 if (register_netdev(dev)) 7111 if (register_netdev(dev))
7111 goto pcidev_init_reg_err; 7112 goto pcidev_init_reg_err;
7112 port_set_power_saving(port, true); 7113 port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index c7b40aa21f22..b1b5f66b8b69 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
1593 dev->irq = spi->irq; 1593 dev->irq = spi->irq;
1594 dev->netdev_ops = &enc28j60_netdev_ops; 1594 dev->netdev_ops = &enc28j60_netdev_ops;
1595 dev->watchdog_timeo = TX_TIMEOUT; 1595 dev->watchdog_timeo = TX_TIMEOUT;
1596 SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); 1596 dev->ethtool_ops = &enc28j60_ethtool_ops;
1597 1597
1598 enc28j60_lowpower(priv, true); 1598 enc28j60_lowpower(priv, true);
1599 1599
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 130f6b204efa..f3d5d79f1cd1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4112 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, 4112 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
4113 (unsigned long)mgp); 4113 (unsigned long)mgp);
4114 4114
4115 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 4115 netdev->ethtool_ops = &myri10ge_ethtool_ops;
4116 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); 4116 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
4117 status = register_netdev(netdev); 4117 status = register_netdev(netdev);
4118 if (status != 0) { 4118 if (status != 0) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 64ec2a437f46..291fba8b9f07 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
927 dev->netdev_ops = &natsemi_netdev_ops; 927 dev->netdev_ops = &natsemi_netdev_ops;
928 dev->watchdog_timeo = TX_TIMEOUT; 928 dev->watchdog_timeo = TX_TIMEOUT;
929 929
930 SET_ETHTOOL_OPS(dev, &ethtool_ops); 930 dev->ethtool_ops = &ethtool_ops;
931 931
932 if (mtu) 932 if (mtu)
933 dev->mtu = mtu; 933 dev->mtu = mtu;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index dbccf1de49ec..19bb8244b9e3 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
2030 pci_dev->subsystem_vendor, pci_dev->subsystem_device); 2030 pci_dev->subsystem_vendor, pci_dev->subsystem_device);
2031 2031
2032 ndev->netdev_ops = &netdev_ops; 2032 ndev->netdev_ops = &netdev_ops;
2033 SET_ETHTOOL_OPS(ndev, &ops); 2033 ndev->ethtool_ops = &ops;
2034 ndev->watchdog_timeo = 5 * HZ; 2034 ndev->watchdog_timeo = 5 * HZ;
2035 pci_set_drvdata(pci_dev, ndev); 2035 pci_set_drvdata(pci_dev, ndev);
2036 2036
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff322c4..be587647c706 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
534 netif_tx_start_all_queues(sp->dev); 534 netif_tx_start_all_queues(sp->dev);
535} 535}
536 536
537static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
538{
539 if (!sp->config.multiq)
540 sp->mac_control.fifos[fifo_no].queue_state =
541 FIFO_QUEUE_START;
542
543 netif_tx_start_all_queues(sp->dev);
544}
545
546static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) 537static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
547{ 538{
548 if (!sp->config.multiq) { 539 if (!sp->config.multiq) {
@@ -5369,8 +5360,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5369 ethtool_cmd_speed_set(info, SPEED_10000); 5360 ethtool_cmd_speed_set(info, SPEED_10000);
5370 info->duplex = DUPLEX_FULL; 5361 info->duplex = DUPLEX_FULL;
5371 } else { 5362 } else {
5372 ethtool_cmd_speed_set(info, -1); 5363 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
5373 info->duplex = -1; 5364 info->duplex = DUPLEX_UNKNOWN;
5374 } 5365 }
5375 5366
5376 info->autoneg = AUTONEG_DISABLE; 5367 info->autoneg = AUTONEG_DISABLE;
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7919 7910
7920 /* Driver entry points */ 7911 /* Driver entry points */
7921 dev->netdev_ops = &s2io_netdev_ops; 7912 dev->netdev_ops = &s2io_netdev_ops;
7922 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7913 dev->ethtool_ops = &netdev_ethtool_ops;
7923 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 7914 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7924 NETIF_F_TSO | NETIF_F_TSO6 | 7915 NETIF_F_TSO | NETIF_F_TSO6 |
7925 NETIF_F_RXCSUM | NETIF_F_LRO; 7916 NETIF_F_RXCSUM | NETIF_F_LRO;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 089b713b9f7b..2bbd01fcb9b0 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -120,7 +120,6 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120{ 120{
121 u64 val64; 121 u64 val64;
122 u32 i = 0; 122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124 123
125 udelay(10); 124 udelay(10);
126 125
@@ -139,7 +138,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
139 mdelay(1); 138 mdelay(1);
140 } while (++i <= max_millis); 139 } while (++i <= max_millis);
141 140
142 return ret; 141 return VXGE_HW_FAIL;
143} 142}
144 143
145static inline enum vxge_hw_status 144static inline enum vxge_hw_status
@@ -1682,12 +1681,10 @@ enum vxge_hw_status vxge_hw_driver_stats_get(
1682 struct __vxge_hw_device *hldev, 1681 struct __vxge_hw_device *hldev,
1683 struct vxge_hw_device_stats_sw_info *sw_stats) 1682 struct vxge_hw_device_stats_sw_info *sw_stats)
1684{ 1683{
1685 enum vxge_hw_status status = VXGE_HW_OK;
1686
1687 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, 1684 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1688 sizeof(struct vxge_hw_device_stats_sw_info)); 1685 sizeof(struct vxge_hw_device_stats_sw_info));
1689 1686
1690 return status; 1687 return VXGE_HW_OK;
1691} 1688}
1692 1689
1693/* 1690/*
@@ -3228,7 +3225,6 @@ enum vxge_hw_status
3228vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) 3225vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3229{ 3226{
3230 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 3227 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3231 enum vxge_hw_status status = VXGE_HW_OK;
3232 int i = 0, j = 0; 3228 int i = 0, j = 0;
3233 3229
3234 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3230 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -3241,7 +3237,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3241 return VXGE_HW_FAIL; 3237 return VXGE_HW_FAIL;
3242 } 3238 }
3243 } 3239 }
3244 return status; 3240 return VXGE_HW_OK;
3245} 3241}
3246/* 3242/*
3247 * vxge_hw_mgmt_reg_Write - Write Titan register. 3243 * vxge_hw_mgmt_reg_Write - Write Titan register.
@@ -3979,7 +3975,6 @@ __vxge_hw_vpath_mgmt_read(
3979{ 3975{
3980 u32 i, mtu = 0, max_pyld = 0; 3976 u32 i, mtu = 0, max_pyld = 0;
3981 u64 val64; 3977 u64 val64;
3982 enum vxge_hw_status status = VXGE_HW_OK;
3983 3978
3984 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { 3979 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3985 3980
@@ -4009,7 +4004,7 @@ __vxge_hw_vpath_mgmt_read(
4009 else 4004 else
4010 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); 4005 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4011 4006
4012 return status; 4007 return VXGE_HW_OK;
4013} 4008}
4014 4009
4015/* 4010/*
@@ -4039,14 +4034,13 @@ static enum vxge_hw_status
4039__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) 4034__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4040{ 4035{
4041 u64 val64; 4036 u64 val64;
4042 enum vxge_hw_status status = VXGE_HW_OK;
4043 4037
4044 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); 4038 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4045 4039
4046 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 4040 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4047 &hldev->common_reg->cmn_rsthdlr_cfg0); 4041 &hldev->common_reg->cmn_rsthdlr_cfg0);
4048 4042
4049 return status; 4043 return VXGE_HW_OK;
4050} 4044}
4051 4045
4052/* 4046/*
@@ -4227,7 +4221,6 @@ static enum vxge_hw_status
4227__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) 4221__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4228{ 4222{
4229 u64 val64; 4223 u64 val64;
4230 enum vxge_hw_status status = VXGE_HW_OK;
4231 struct __vxge_hw_virtualpath *vpath; 4224 struct __vxge_hw_virtualpath *vpath;
4232 struct vxge_hw_vp_config *vp_config; 4225 struct vxge_hw_vp_config *vp_config;
4233 struct vxge_hw_vpath_reg __iomem *vp_reg; 4226 struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4283,7 +4276,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4283 4276
4284 writeq(val64, &vp_reg->rxmac_vcfg1); 4277 writeq(val64, &vp_reg->rxmac_vcfg1);
4285 } 4278 }
4286 return status; 4279 return VXGE_HW_OK;
4287} 4280}
4288 4281
4289/* 4282/*
@@ -4295,7 +4288,6 @@ static enum vxge_hw_status
4295__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) 4288__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4296{ 4289{
4297 u64 val64; 4290 u64 val64;
4298 enum vxge_hw_status status = VXGE_HW_OK;
4299 struct __vxge_hw_virtualpath *vpath; 4291 struct __vxge_hw_virtualpath *vpath;
4300 struct vxge_hw_vpath_reg __iomem *vp_reg; 4292 struct vxge_hw_vpath_reg __iomem *vp_reg;
4301 struct vxge_hw_vp_config *config; 4293 struct vxge_hw_vp_config *config;
@@ -4545,7 +4537,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4545 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); 4537 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4546 writeq(val64, &vp_reg->tim_wrkld_clc); 4538 writeq(val64, &vp_reg->tim_wrkld_clc);
4547 4539
4548 return status; 4540 return VXGE_HW_OK;
4549} 4541}
4550 4542
4551/* 4543/*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index f8f073880f84..b07d552a27d4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -62,8 +62,8 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
62 ethtool_cmd_speed_set(info, SPEED_10000); 62 ethtool_cmd_speed_set(info, SPEED_10000);
63 info->duplex = DUPLEX_FULL; 63 info->duplex = DUPLEX_FULL;
64 } else { 64 } else {
65 ethtool_cmd_speed_set(info, -1); 65 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
66 info->duplex = -1; 66 info->duplex = DUPLEX_UNKNOWN;
67 } 67 }
68 68
69 info->autoneg = AUTONEG_DISABLE; 69 info->autoneg = AUTONEG_DISABLE;
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1128 1128
1129void vxge_initialize_ethtool_ops(struct net_device *ndev) 1129void vxge_initialize_ethtool_ops(struct net_device *ndev)
1130{ 1130{
1131 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); 1131 ndev->ethtool_ops = &vxge_ethtool_ops;
1132} 1132}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcbb8543..7a0deadd53bf 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2122static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) 2122static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2123{ 2123{
2124 fifo->interrupt_count++; 2124 fifo->interrupt_count++;
2125 if (jiffies > fifo->jiffies + HZ / 100) { 2125 if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
2126 struct __vxge_hw_fifo *hw_fifo = fifo->handle; 2126 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2127 2127
2128 fifo->jiffies = jiffies; 2128 fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2150static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) 2150static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2151{ 2151{
2152 ring->interrupt_count++; 2152 ring->interrupt_count++;
2153 if (jiffies > ring->jiffies + HZ / 100) { 2153 if (time_before(ring->jiffies + HZ / 100, jiffies)) {
2154 struct __vxge_hw_ring *hw_ring = ring->handle; 2154 struct __vxge_hw_ring *hw_ring = ring->handle;
2155 2155
2156 ring->jiffies = jiffies; 2156 ring->jiffies = jiffies;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index fddb464aeab3..9afc536c5734 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -406,7 +406,7 @@ union ring_type {
406 406
407#define NV_RX_DESCRIPTORVALID (1<<16) 407#define NV_RX_DESCRIPTORVALID (1<<16)
408#define NV_RX_MISSEDFRAME (1<<17) 408#define NV_RX_MISSEDFRAME (1<<17)
409#define NV_RX_SUBSTRACT1 (1<<18) 409#define NV_RX_SUBTRACT1 (1<<18)
410#define NV_RX_ERROR1 (1<<23) 410#define NV_RX_ERROR1 (1<<23)
411#define NV_RX_ERROR2 (1<<24) 411#define NV_RX_ERROR2 (1<<24)
412#define NV_RX_ERROR3 (1<<25) 412#define NV_RX_ERROR3 (1<<25)
@@ -423,7 +423,7 @@ union ring_type {
423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
425#define NV_RX2_DESCRIPTORVALID (1<<29) 425#define NV_RX2_DESCRIPTORVALID (1<<29)
426#define NV_RX2_SUBSTRACT1 (1<<25) 426#define NV_RX2_SUBTRACT1 (1<<25)
427#define NV_RX2_ERROR1 (1<<18) 427#define NV_RX2_ERROR1 (1<<18)
428#define NV_RX2_ERROR2 (1<<19) 428#define NV_RX2_ERROR2 (1<<19)
429#define NV_RX2_ERROR3 (1<<20) 429#define NV_RX2_ERROR3 (1<<20)
@@ -2832,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2832 } 2832 }
2833 /* framing errors are soft errors */ 2833 /* framing errors are soft errors */
2834 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2834 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2835 if (flags & NV_RX_SUBSTRACT1) 2835 if (flags & NV_RX_SUBTRACT1)
2836 len--; 2836 len--;
2837 } 2837 }
2838 /* the rest are hard errors */ 2838 /* the rest are hard errors */
@@ -2863,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2863 } 2863 }
2864 /* framing errors are soft errors */ 2864 /* framing errors are soft errors */
2865 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2865 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2866 if (flags & NV_RX2_SUBSTRACT1) 2866 if (flags & NV_RX2_SUBTRACT1)
2867 len--; 2867 len--;
2868 } 2868 }
2869 /* the rest are hard errors */ 2869 /* the rest are hard errors */
@@ -2937,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2937 } 2937 }
2938 /* framing errors are soft errors */ 2938 /* framing errors are soft errors */
2939 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2939 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2940 if (flags & NV_RX2_SUBSTRACT1) 2940 if (flags & NV_RX2_SUBTRACT1)
2941 len--; 2941 len--;
2942 } 2942 }
2943 /* the rest are hard errors */ 2943 /* the rest are hard errors */
@@ -4285,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4285 if (np->duplex) 4285 if (np->duplex)
4286 ecmd->duplex = DUPLEX_FULL; 4286 ecmd->duplex = DUPLEX_FULL;
4287 } else { 4287 } else {
4288 speed = -1; 4288 speed = SPEED_UNKNOWN;
4289 ecmd->duplex = -1; 4289 ecmd->duplex = DUPLEX_UNKNOWN;
4290 } 4290 }
4291 ethtool_cmd_speed_set(ecmd, speed); 4291 ethtool_cmd_speed_set(ecmd, speed);
4292 ecmd->autoneg = np->autoneg; 4292 ecmd->autoneg = np->autoneg;
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5766 dev->netdev_ops = &nv_netdev_ops_optimized; 5766 dev->netdev_ops = &nv_netdev_ops_optimized;
5767 5767
5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5769 SET_ETHTOOL_OPS(dev, &ops); 5769 dev->ethtool_ops = &ops;
5770 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5770 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5771 5771
5772 pci_set_drvdata(pci_dev, dev); 5772 pci_set_drvdata(pci_dev, dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 422d9b51ac24..8706c0dbd0c3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1361,7 +1361,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1361 __lpc_eth_clock_enable(pldat, true); 1361 __lpc_eth_clock_enable(pldat, true);
1362 1362
1363 /* Map IO space */ 1363 /* Map IO space */
1364 pldat->net_base = ioremap(res->start, res->end - res->start + 1); 1364 pldat->net_base = ioremap(res->start, resource_size(res));
1365 if (!pldat->net_base) { 1365 if (!pldat->net_base) {
1366 dev_err(&pdev->dev, "failed to map registers\n"); 1366 dev_err(&pdev->dev, "failed to map registers\n");
1367 ret = -ENOMEM; 1367 ret = -ENOMEM;
@@ -1417,10 +1417,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1417 } 1417 }
1418 pldat->dma_buff_base_p = dma_handle; 1418 pldat->dma_buff_base_p = dma_handle;
1419 1419
1420 netdev_dbg(ndev, "IO address start :0x%08x\n", 1420 netdev_dbg(ndev, "IO address space :%pR\n", res);
1421 res->start); 1421 netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
1422 netdev_dbg(ndev, "IO address size :%d\n",
1423 res->end - res->start + 1);
1424 netdev_dbg(ndev, "IO address (mapped) :0x%p\n", 1422 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1425 pldat->net_base); 1423 pldat->net_base);
1426 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); 1424 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index a588ffde9700..44c8be1c6805 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI && (X86 || COMPILE_TEST) 7 depends on PCI && (X86_32 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 826f0ccdc23c..4fe8ea96bd25 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -91,7 +91,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
91 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); 91 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
92 92
93 if (!netif_carrier_ok(adapter->netdev)) 93 if (!netif_carrier_ok(adapter->netdev))
94 ethtool_cmd_speed_set(ecmd, -1); 94 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
95 return ret; 95 return ret;
96} 96}
97 97
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
508 508
509void pch_gbe_set_ethtool_ops(struct net_device *netdev) 509void pch_gbe_set_ethtool_ops(struct net_device *netdev)
510{ 510{
511 SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops); 511 netdev->ethtool_ops = &pch_gbe_ethtool_ops;
512} 512}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index b6bdeb3c1971..9a997e4c3e08 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
724 724
725 /* The Hamachi-specific entries in the device structure. */ 725 /* The Hamachi-specific entries in the device structure. */
726 dev->netdev_ops = &hamachi_netdev_ops; 726 dev->netdev_ops = &hamachi_netdev_ops;
727 if (chip_tbl[hmp->chip_id].flags & CanHaveMII) 727 dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
728 SET_ETHTOOL_OPS(dev, &ethtool_ops); 728 &ethtool_ops : &ethtool_ops_no_mii;
729 else
730 SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
731 dev->watchdog_timeo = TX_TIMEOUT; 729 dev->watchdog_timeo = TX_TIMEOUT;
732 if (mtu) 730 if (mtu)
733 dev->mtu = mtu; 731 dev->mtu = mtu;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 9a6cb482dcd0..69a8dc095072 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
472 472
473 /* The Yellowfin-specific entries in the device structure. */ 473 /* The Yellowfin-specific entries in the device structure. */
474 dev->netdev_ops = &netdev_ops; 474 dev->netdev_ops = &netdev_ops;
475 SET_ETHTOOL_OPS(dev, &ethtool_ops); 475 dev->ethtool_ops = &ethtool_ops;
476 dev->watchdog_timeo = TX_TIMEOUT; 476 dev->watchdog_timeo = TX_TIMEOUT;
477 477
478 if (mtu) 478 if (mtu)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd3116e45..d49cba129081 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
66 Say Y here if you want to enable hardware offload support for 66 Say Y here if you want to enable hardware offload support for
67 Virtual eXtensible Local Area Network (VXLAN) in the driver. 67 Virtual eXtensible Local Area Network (VXLAN) in the driver.
68 68
69config QLCNIC_HWMON
70 bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
71 depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
72 default y
73 ---help---
74 This configuration parameter can be used to read the
75 board temperature in Converged Ethernet devices
76 supported by qlcnic.
77
78 This data is available via the hwmon sysfs interface.
79
69config QLGE 80config QLGE
70 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 81 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
71 depends on PCI 82 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f09c35d669b3..5bf05818a12c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1373 1373
1374 netxen_nic_change_mtu(netdev, netdev->mtu); 1374 netxen_nic_change_mtu(netdev, netdev->mtu);
1375 1375
1376 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 1376 netdev->ethtool_ops = &netxen_nic_ethtool_ops;
1377 1377
1378 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1378 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1379 NETIF_F_RXCSUM; 1379 NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2eabd44f8914..b5d6bc1a8b00 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
3838 3838
3839 /* Set driver entry points */ 3839 /* Set driver entry points */
3840 ndev->netdev_ops = &ql3xxx_netdev_ops; 3840 ndev->netdev_ops = &ql3xxx_netdev_ops;
3841 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3841 ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3842 ndev->watchdog_timeo = 5 * HZ; 3842 ndev->watchdog_timeo = 5 * HZ;
3843 3843
3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01c7d12..be618b9e874f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
39 39
40#define _QLCNIC_LINUX_MAJOR 5 40#define _QLCNIC_LINUX_MAJOR 5
41#define _QLCNIC_LINUX_MINOR 3 41#define _QLCNIC_LINUX_MINOR 3
42#define _QLCNIC_LINUX_SUBVERSION 57 42#define _QLCNIC_LINUX_SUBVERSION 60
43#define QLCNIC_LINUX_VERSIONID "5.3.57" 43#define QLCNIC_LINUX_VERSIONID "5.3.60"
44#define QLCNIC_DRV_IDC_VER 0x01 44#define QLCNIC_DRV_IDC_VER 0x01
45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -441,6 +441,8 @@ struct qlcnic_82xx_dump_template_hdr {
441 u32 rsvd1[0]; 441 u32 rsvd1[0];
442}; 442};
443 443
444#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
445
444struct qlcnic_fw_dump { 446struct qlcnic_fw_dump {
445 u8 clr; /* flag to indicate if dump is cleared */ 447 u8 clr; /* flag to indicate if dump is cleared */
446 bool enable; /* enable/disable dump */ 448 bool enable; /* enable/disable dump */
@@ -537,6 +539,7 @@ struct qlcnic_hardware_context {
537 u8 phys_port_id[ETH_ALEN]; 539 u8 phys_port_id[ETH_ALEN];
538 u8 lb_mode; 540 u8 lb_mode;
539 u16 vxlan_port; 541 u16 vxlan_port;
542 struct device *hwmon_dev;
540}; 543};
541 544
542struct qlcnic_adapter_stats { 545struct qlcnic_adapter_stats {
@@ -1018,6 +1021,8 @@ struct qlcnic_ipaddr {
1018#define QLCNIC_DEL_VXLAN_PORT 0x200000 1021#define QLCNIC_DEL_VXLAN_PORT 0x200000
1019#endif 1022#endif
1020 1023
1024#define QLCNIC_VLAN_FILTERING 0x800000
1025
1021#define QLCNIC_IS_MSI_FAMILY(adapter) \ 1026#define QLCNIC_IS_MSI_FAMILY(adapter) \
1022 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 1027 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
1023#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 1028#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1316,6 +1321,7 @@ struct qlcnic_eswitch {
1316#define QL_STATUS_INVALID_PARAM -1 1321#define QL_STATUS_INVALID_PARAM -1
1317 1322
1318#define MAX_BW 100 /* % of link speed */ 1323#define MAX_BW 100 /* % of link speed */
1324#define MIN_BW 1 /* % of link speed */
1319#define MAX_VLAN_ID 4095 1325#define MAX_VLAN_ID 4095
1320#define MIN_VLAN_ID 2 1326#define MIN_VLAN_ID 2
1321#define DEFAULT_MAC_LEARN 1 1327#define DEFAULT_MAC_LEARN 1
@@ -1692,7 +1698,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1692int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); 1698int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
1693void qlcnic_set_netdev_features(struct qlcnic_adapter *, 1699void qlcnic_set_netdev_features(struct qlcnic_adapter *,
1694 struct qlcnic_esw_func_cfg *); 1700 struct qlcnic_esw_func_cfg *);
1695void qlcnic_sriov_vf_schedule_multi(struct net_device *); 1701void qlcnic_sriov_vf_set_multi(struct net_device *);
1696int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8); 1702int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
1697int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *, 1703int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
1698 u16 *); 1704 u16 *);
@@ -2338,6 +2344,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
2338 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; 2344 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
2339} 2345}
2340 2346
2347static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
2348{
2349 bool status;
2350
2351 status = (qlcnic_sriov_pf_check(adapter) ||
2352 qlcnic_sriov_vf_check(adapter)) ? true : false;
2353
2354 return status;
2355}
2356
2341static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) 2357static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2342{ 2358{
2343 if (qlcnic_84xx_check(adapter)) 2359 if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2361,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2345 else 2361 else
2346 return QLC_DEFAULT_VNIC_COUNT; 2362 return QLC_DEFAULT_VNIC_COUNT;
2347} 2363}
2364
2365#ifdef CONFIG_QLCNIC_HWMON
2366void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
2367void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
2368#else
2369static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
2370{
2371 return;
2372}
2373static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
2374{
2375 return;
2376}
2377#endif
2348#endif /* __QLCNIC_H_ */ 2378#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb46a75d..a4a4ec0b68f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
33#define RSS_HASHTYPE_IP_TCP 0x3 33#define RSS_HASHTYPE_IP_TCP 0x3
34#define QLC_83XX_FW_MBX_CMD 0 34#define QLC_83XX_FW_MBX_CMD 0
35#define QLC_SKIP_INACTIVE_PCI_REGS 7 35#define QLC_SKIP_INACTIVE_PCI_REGS 7
36#define QLC_MAX_LEGACY_FUNC_SUPP 8
36 37
37static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { 38static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
38 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, 39 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
357 if (!ahw->intr_tbl) 358 if (!ahw->intr_tbl)
358 return -ENOMEM; 359 return -ENOMEM;
359 360
360 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 361 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
362 if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
363 dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
364 ahw->pci_func);
365 return -EOPNOTSUPP;
366 }
367
361 qlcnic_83xx_enable_legacy(adapter); 368 qlcnic_83xx_enable_legacy(adapter);
369 }
362 370
363 for (i = 0; i < num_msix; i++) { 371 for (i = 0; i < num_msix; i++) {
364 if (adapter->flags & QLCNIC_MSIX_ENABLED) 372 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
879 return 0; 887 return 0;
880 } 888 }
881 } 889 }
890
891 dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
892 __func__, type);
882 return -EINVAL; 893 return -EINVAL;
883} 894}
884 895
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
3026 QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); 3037 QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
3027} 3038}
3028 3039
3029int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, 3040int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
3030 u32 *data, u32 count) 3041 u32 *data, u32 count)
3031{ 3042{
3032 int i, j, ret = 0; 3043 int i, j, ret = 0;
3033 u32 temp; 3044 u32 temp;
3034 int err = 0;
3035 3045
3036 /* Check alignment */ 3046 /* Check alignment */
3037 if (addr & 0xF) 3047 if (addr & 0xF)
3038 return -EIO; 3048 return -EIO;
3039 3049
3040 mutex_lock(&adapter->ahw->mem_lock); 3050 mutex_lock(&adapter->ahw->mem_lock);
3041 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0); 3051 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
3042 3052
3043 for (i = 0; i < count; i++, addr += 16) { 3053 for (i = 0; i < count; i++, addr += 16) {
3044 if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET, 3054 if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
3049 return -EIO; 3059 return -EIO;
3050 } 3060 }
3051 3061
3052 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr); 3062 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
3053 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO, 3063 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
3054 *data++); 3064 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
3055 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI, 3065 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
3056 *data++); 3066 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
3057 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO, 3067 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
3058 *data++); 3068 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
3059 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
3060 *data++);
3061 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
3062 QLCNIC_TA_WRITE_ENABLE);
3063 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
3064 QLCNIC_TA_WRITE_START);
3065 3069
3066 for (j = 0; j < MAX_CTL_CHECK; j++) { 3070 for (j = 0; j < MAX_CTL_CHECK; j++) {
3067 temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err); 3071 temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
3068 if (err == -EIO) {
3069 mutex_unlock(&adapter->ahw->mem_lock);
3070 return err;
3071 }
3072 3072
3073 if ((temp & TA_CTL_BUSY) == 0) 3073 if ((temp & TA_CTL_BUSY) == 0)
3074 break; 3074 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c35633..2bf101a47d02 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -418,7 +418,6 @@ enum qlcnic_83xx_states {
418#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) 418#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
419#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) 419#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
420#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) 420#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
421#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
422#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) 421#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
423#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) 422#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
424#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 423#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
@@ -560,7 +559,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
560void qlcnic_83xx_napi_enable(struct qlcnic_adapter *); 559void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
561void qlcnic_83xx_napi_disable(struct qlcnic_adapter *); 560void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
562int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32); 561int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
563void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32); 562int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
564int qlcnic_ind_rd(struct qlcnic_adapter *, u32); 563int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
565int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); 564int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
566int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, 565int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +616,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
617int qlcnic_83xx_lock_driver(struct qlcnic_adapter *); 616int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
618void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *); 617void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
619int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *); 618int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
620int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
621int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *); 619int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
622int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int); 620int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
623int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); 621int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +657,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
659u32 qlcnic_83xx_get_cap_size(void *, int); 657u32 qlcnic_83xx_get_cap_size(void *, int);
660void qlcnic_83xx_set_sys_info(void *, int, u32); 658void qlcnic_83xx_set_sys_info(void *, int, u32);
661void qlcnic_83xx_store_cap_mask(void *, u32); 659void qlcnic_83xx_store_cap_mask(void *, u32);
660int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
662#endif 661#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c721ee97..f33559b72528 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1363 return ret; 1363 return ret;
1364 } 1364 }
1365 /* 16 byte write to MS memory */ 1365 /* 16 byte write to MS memory */
1366 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1366 ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1367 size / 16); 1367 size / 16);
1368 if (ret) { 1368 if (ret) {
1369 vfree(p_cache); 1369 vfree(p_cache);
1370 return ret; 1370 return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1389 p_cache = (u32 *)fw->data; 1389 p_cache = (u32 *)fw->data;
1390 addr = (u64)dest; 1390 addr = (u64)dest;
1391 1391
1392 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1392 ret = qlcnic_ms_mem_write128(adapter, addr,
1393 p_cache, size / 16); 1393 p_cache, size / 16);
1394 if (ret) { 1394 if (ret) {
1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1396 release_firmware(fw); 1396 release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1405 data[i] = fw->data[size + i]; 1405 data[i] = fw->data[size + i];
1406 for (; i < 16; i++) 1406 for (; i < 16; i++)
1407 data[i] = 0; 1407 data[i] = 0;
1408 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1408 ret = qlcnic_ms_mem_write128(adapter, addr,
1409 (u32 *)data, 1); 1409 (u32 *)data, 1);
1410 if (ret) { 1410 if (ret) {
1411 dev_err(&adapter->pdev->dev, 1411 dev_err(&adapter->pdev->dev,
1412 "MS memory write failed\n"); 1412 "MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2182 max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2183 } else { 2183 } else {
2184 dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
2185 __func__, ret);
2184 return -EIO; 2186 return -EIO;
2185 } 2187 }
2186 2188
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5715b0..304e247bdf33 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
1027 u32 arg1; 1027 u32 arg1;
1028 1028
1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
1031 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1032 __func__);
1031 return err; 1033 return err;
1034 }
1032 1035
1033 arg1 = id | (enable_mirroring ? BIT_4 : 0); 1036 arg1 = id | (enable_mirroring ? BIT_4 : 0);
1034 arg1 |= pci_func << 8; 1037 arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1318 u32 arg1, arg2 = 0; 1321 u32 arg1, arg2 = 0;
1319 u8 pci_func; 1322 u8 pci_func;
1320 1323
1321 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1324 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
1325 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1326 __func__);
1322 return err; 1327 return err;
1328 }
1329
1323 pci_func = esw_cfg->pci_func; 1330 pci_func = esw_cfg->pci_func;
1324 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1331 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1325 if (index < 0) 1332 if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1363 arg1 &= ~(0x0ffff << 16); 1370 arg1 &= ~(0x0ffff << 16);
1364 break; 1371 break;
1365 default: 1372 default:
1373 dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
1374 __func__, esw_cfg->op_mode);
1366 return err; 1375 return err;
1367 } 1376 }
1368 1377
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5bacf5210aed..1b7f3dbae289 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -726,6 +726,11 @@ static int qlcnic_set_channels(struct net_device *dev,
726 struct qlcnic_adapter *adapter = netdev_priv(dev); 726 struct qlcnic_adapter *adapter = netdev_priv(dev);
727 int err; 727 int err;
728 728
729 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
730 netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
731 return -EINVAL;
732 }
733
729 if (channel->other_count || channel->combined_count) 734 if (channel->other_count || channel->combined_count)
730 return -EINVAL; 735 return -EINVAL;
731 736
@@ -734,7 +739,7 @@ static int qlcnic_set_channels(struct net_device *dev,
734 if (err) 739 if (err)
735 return err; 740 return err;
736 741
737 if (channel->rx_count) { 742 if (adapter->drv_sds_rings != channel->rx_count) {
738 err = qlcnic_validate_rings(adapter, channel->rx_count, 743 err = qlcnic_validate_rings(adapter, channel->rx_count,
739 QLCNIC_RX_QUEUE); 744 QLCNIC_RX_QUEUE);
740 if (err) { 745 if (err) {
@@ -745,7 +750,7 @@ static int qlcnic_set_channels(struct net_device *dev,
745 adapter->drv_rss_rings = channel->rx_count; 750 adapter->drv_rss_rings = channel->rx_count;
746 } 751 }
747 752
748 if (channel->tx_count) { 753 if (adapter->drv_tx_rings != channel->tx_count) {
749 err = qlcnic_validate_rings(adapter, channel->tx_count, 754 err = qlcnic_validate_rings(adapter, channel->tx_count,
750 QLCNIC_TX_QUEUE); 755 QLCNIC_TX_QUEUE);
751 if (err) { 756 if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4e70b5..851cb4a80d50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
373 return data; 373 return data;
374} 374}
375 375
376void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) 376int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
377{ 377{
378 int ret = 0;
379
378 if (qlcnic_82xx_check(adapter)) 380 if (qlcnic_82xx_check(adapter))
379 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data); 381 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
380 else 382 else
381 qlcnic_83xx_wrt_reg_indirect(adapter, addr, data); 383 ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
384
385 return ret;
382} 386}
383 387
384static int 388static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
567void qlcnic_set_multi(struct net_device *netdev) 571void qlcnic_set_multi(struct net_device *netdev)
568{ 572{
569 struct qlcnic_adapter *adapter = netdev_priv(netdev); 573 struct qlcnic_adapter *adapter = netdev_priv(netdev);
570 struct qlcnic_mac_vlan_list *cur;
571 struct netdev_hw_addr *ha;
572 size_t temp;
573 574
574 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 575 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
575 return; 576 return;
576 if (qlcnic_sriov_vf_check(adapter)) { 577
577 if (!netdev_mc_empty(netdev)) { 578 if (qlcnic_sriov_vf_check(adapter))
578 netdev_for_each_mc_addr(ha, netdev) { 579 qlcnic_sriov_vf_set_multi(netdev);
579 temp = sizeof(struct qlcnic_mac_vlan_list); 580 else
580 cur = kzalloc(temp, GFP_ATOMIC); 581 __qlcnic_set_multi(netdev, 0);
581 if (cur == NULL)
582 break;
583 memcpy(cur->mac_addr,
584 ha->addr, ETH_ALEN);
585 list_add_tail(&cur->list, &adapter->vf_mc_list);
586 }
587 }
588 qlcnic_sriov_vf_schedule_multi(adapter->netdev);
589 return;
590 }
591 __qlcnic_set_multi(netdev, 0);
592} 582}
593 583
594int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 584int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
630 struct hlist_node *n; 620 struct hlist_node *n;
631 struct hlist_head *head; 621 struct hlist_head *head;
632 int i; 622 int i;
633 unsigned long time; 623 unsigned long expires;
634 u8 cmd; 624 u8 cmd;
635 625
636 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 626 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
638 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { 628 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
639 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 629 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
640 QLCNIC_MAC_DEL; 630 QLCNIC_MAC_DEL;
641 time = tmp_fil->ftime; 631 expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
642 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 632 if (time_before(expires, jiffies)) {
643 qlcnic_sre_macaddr_change(adapter, 633 qlcnic_sre_macaddr_change(adapter,
644 tmp_fil->faddr, 634 tmp_fil->faddr,
645 tmp_fil->vlan_id, 635 tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
657 647
658 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) 648 hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
659 { 649 {
660 time = tmp_fil->ftime; 650 expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
661 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 651 if (time_before(expires, jiffies)) {
662 spin_lock_bh(&adapter->rx_mac_learn_lock); 652 spin_lock_bh(&adapter->rx_mac_learn_lock);
663 adapter->rx_fhash.fnum--; 653 adapter->rx_fhash.fnum--;
664 hlist_del(&tmp_fil->fnode); 654 hlist_del(&tmp_fil->fnode);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d12991f..e45bf09af0c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
305{ 305{
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); 306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
308 struct net_device *netdev = adapter->netdev;
309 u16 protocol = ntohs(skb->protocol); 308 u16 protocol = ntohs(skb->protocol);
310 struct qlcnic_filter *fil, *tmp_fil; 309 struct qlcnic_filter *fil, *tmp_fil;
311 struct hlist_head *head; 310 struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
314 u16 vlan_id = 0; 313 u16 vlan_id = 0;
315 u8 hindex, hval; 314 u8 hindex, hval;
316 315
317 if (!qlcnic_sriov_pf_check(adapter)) { 316 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
318 if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) 317 return;
319 return; 318
320 } else { 319 if (adapter->flags & QLCNIC_VLAN_FILTERING) {
321 if (protocol == ETH_P_8021Q) { 320 if (protocol == ETH_P_8021Q) {
322 vh = (struct vlan_ethhdr *)skb->data; 321 vh = (struct vlan_ethhdr *)skb->data;
323 vlan_id = ntohs(vh->h_vlan_TCI); 322 vlan_id = ntohs(vh->h_vlan_TCI);
324 } else if (vlan_tx_tag_present(skb)) { 323 } else if (vlan_tx_tag_present(skb)) {
325 vlan_id = vlan_tx_tag_get(skb); 324 vlan_id = vlan_tx_tag_get(skb);
326 } 325 }
327
328 if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
329 !vlan_id)
330 return;
331 }
332
333 if (adapter->fhash.fnum >= adapter->fhash.fmax) {
334 adapter->stats.mac_filter_limit_overrun++;
335 netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
336 adapter->fhash.fmax, adapter->fhash.fnum);
337 return;
338 } 326 }
339 327
340 memcpy(&src_addr, phdr->h_source, ETH_ALEN); 328 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
353 } 341 }
354 } 342 }
355 343
344 if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
345 adapter->stats.mac_filter_limit_overrun++;
346 return;
347 }
348
356 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); 349 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
357 if (!fil) 350 if (!fil)
358 return; 351 return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1216 if (!skb) 1209 if (!skb)
1217 return buffer; 1210 return buffer;
1218 1211
1219 if (adapter->drv_mac_learn && 1212 if (adapter->rx_mac_learn) {
1220 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1221 t_vid = 0; 1213 t_vid = 0;
1222 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1214 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1223 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); 1215 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1293 if (!skb) 1285 if (!skb)
1294 return buffer; 1286 return buffer;
1295 1287
1296 if (adapter->drv_mac_learn && 1288 if (adapter->rx_mac_learn) {
1297 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1298 t_vid = 0; 1289 t_vid = 0;
1299 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1290 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1300 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); 1291 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88a81bf..4fc186713b66 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
378 if (!adapter->fdb_mac_learn) 378 if (!adapter->fdb_mac_learn)
379 return ndo_dflt_fdb_del(ndm, tb, netdev, addr); 379 return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
380 380
381 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 381 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
382 qlcnic_sriov_check(adapter)) {
382 if (is_unicast_ether_addr(addr)) { 383 if (is_unicast_ether_addr(addr)) {
383 err = dev_uc_del(netdev, addr); 384 err = dev_uc_del(netdev, addr);
384 if (!err) 385 if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
402 if (!adapter->fdb_mac_learn) 403 if (!adapter->fdb_mac_learn)
403 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); 404 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
404 405
405 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 406 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
407 !qlcnic_sriov_check(adapter)) {
406 pr_info("%s: FDB e-switch is not enabled\n", __func__); 408 pr_info("%s: FDB e-switch is not enabled\n", __func__);
407 return -EOPNOTSUPP; 409 return -EOPNOTSUPP;
408 } 410 }
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
432 if (!adapter->fdb_mac_learn) 434 if (!adapter->fdb_mac_learn)
433 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 435 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
434 436
435 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 437 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
438 qlcnic_sriov_check(adapter))
436 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 439 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
437 440
438 return idx; 441 return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
522#endif 525#endif
523#ifdef CONFIG_QLCNIC_SRIOV 526#ifdef CONFIG_QLCNIC_SRIOV
524 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, 527 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
525 .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, 528 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
526 .ndo_get_vf_config = qlcnic_sriov_get_vf_config, 529 .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
527 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, 530 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
528 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, 531 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
690 adapter->msix_entries[vector].entry = vector; 693 adapter->msix_entries[vector].entry = vector;
691 694
692restore: 695restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 696 err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
694 if (err > 0) { 697 if (err == -ENOSPC) {
695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings) 698 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 return -ENOSPC; 699 return err;
697 700
698 netdev_info(adapter->netdev, 701 netdev_info(adapter->netdev,
699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 702 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
1014 1017
1015 if (pfn >= ahw->max_vnic_func) { 1018 if (pfn >= ahw->max_vnic_func) {
1016 ret = QL_STATUS_INVALID_PARAM; 1019 ret = QL_STATUS_INVALID_PARAM;
1020 dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
1021 __func__, pfn, ahw->max_vnic_func);
1017 goto err_eswitch; 1022 goto err_eswitch;
1018 } 1023 }
1019 1024
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1915 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) 1920 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1916 return; 1921 return;
1917 1922
1918 if (qlcnic_sriov_vf_check(adapter))
1919 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1920 smp_mb(); 1923 smp_mb();
1921 netif_carrier_off(netdev); 1924 netif_carrier_off(netdev);
1922 adapter->ahw->linkup = 0; 1925 adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1928 qlcnic_delete_lb_filters(adapter); 1931 qlcnic_delete_lb_filters(adapter);
1929 1932
1930 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); 1933 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1934 if (qlcnic_sriov_vf_check(adapter))
1935 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1931 1936
1932 qlcnic_napi_disable(adapter); 1937 qlcnic_napi_disable(adapter);
1933 1938
@@ -2052,6 +2057,7 @@ out:
2052 2057
2053static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) 2058static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
2054{ 2059{
2060 struct qlcnic_hardware_context *ahw = adapter->ahw;
2055 int err = 0; 2061 int err = 0;
2056 2062
2057 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), 2063 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
2061 goto err_out; 2067 goto err_out;
2062 } 2068 }
2063 2069
2070 if (qlcnic_83xx_check(adapter)) {
2071 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
2072 ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
2073 ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
2074 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
2075 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
2076 } else {
2077 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
2078 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
2079 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
2080 }
2081
2064 /* clear stats */ 2082 /* clear stats */
2065 memset(&adapter->stats, 0, sizeof(adapter->stats)); 2083 memset(&adapter->stats, 0, sizeof(adapter->stats));
2066err_out: 2084err_out:
@@ -2069,12 +2087,20 @@ err_out:
2069 2087
2070static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) 2088static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
2071{ 2089{
2090 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
2091
2072 kfree(adapter->recv_ctx); 2092 kfree(adapter->recv_ctx);
2073 adapter->recv_ctx = NULL; 2093 adapter->recv_ctx = NULL;
2074 2094
2075 if (adapter->ahw->fw_dump.tmpl_hdr) { 2095 if (fw_dump->tmpl_hdr) {
2076 vfree(adapter->ahw->fw_dump.tmpl_hdr); 2096 vfree(fw_dump->tmpl_hdr);
2077 adapter->ahw->fw_dump.tmpl_hdr = NULL; 2097 fw_dump->tmpl_hdr = NULL;
2098 }
2099
2100 if (fw_dump->dma_buffer) {
2101 dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
2102 fw_dump->dma_buffer, fw_dump->phys_addr);
2103 fw_dump->dma_buffer = NULL;
2078 } 2104 }
2079 2105
2080 kfree(adapter->ahw->reset.buff); 2106 kfree(adapter->ahw->reset.buff);
@@ -2247,10 +2273,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2247 2273
2248 qlcnic_change_mtu(netdev, netdev->mtu); 2274 qlcnic_change_mtu(netdev, netdev->mtu);
2249 2275
2250 if (qlcnic_sriov_vf_check(adapter)) 2276 netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
2251 SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops); 2277 &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
2252 else
2253 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
2254 2278
2255 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2279 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2256 NETIF_F_IPV6_CSUM | NETIF_F_GRO | 2280 NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2417,9 +2441,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2417 int err, pci_using_dac = -1; 2441 int err, pci_using_dac = -1;
2418 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 2442 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
2419 2443
2420 if (pdev->is_virtfn)
2421 return -ENODEV;
2422
2423 err = pci_enable_device(pdev); 2444 err = pci_enable_device(pdev);
2424 if (err) 2445 if (err)
2425 return err; 2446 return err;
@@ -2552,9 +2573,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2552 case -ENOMEM: 2573 case -ENOMEM:
2553 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); 2574 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
2554 goto err_out_free_hw; 2575 goto err_out_free_hw;
2576 case -EOPNOTSUPP:
2577 dev_err(&pdev->dev, "Adapter initialization failed\n");
2578 goto err_out_free_hw;
2555 default: 2579 default:
2556 dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n"); 2580 dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
2557 dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
2558 goto err_out_maintenance_mode; 2581 goto err_out_maintenance_mode;
2559 } 2582 }
2560 } 2583 }
@@ -2628,7 +2651,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2628 qlcnic_alloc_lb_filters_mem(adapter); 2651 qlcnic_alloc_lb_filters_mem(adapter);
2629 2652
2630 qlcnic_add_sysfs(adapter); 2653 qlcnic_add_sysfs(adapter);
2631 2654 qlcnic_register_hwmon_dev(adapter);
2632 return 0; 2655 return 0;
2633 2656
2634err_out_disable_mbx_intr: 2657err_out_disable_mbx_intr:
@@ -2665,7 +2688,7 @@ err_out_disable_pdev:
2665err_out_maintenance_mode: 2688err_out_maintenance_mode:
2666 set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state); 2689 set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
2667 netdev->netdev_ops = &qlcnic_netdev_failed_ops; 2690 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2668 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); 2691 netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
2669 ahw->port_type = QLCNIC_XGBE; 2692 ahw->port_type = QLCNIC_XGBE;
2670 2693
2671 if (qlcnic_83xx_check(adapter)) 2694 if (qlcnic_83xx_check(adapter))
@@ -2698,9 +2721,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
2698 return; 2721 return;
2699 2722
2700 netdev = adapter->netdev; 2723 netdev = adapter->netdev;
2701 qlcnic_sriov_pf_disable(adapter);
2702 2724
2703 qlcnic_cancel_idc_work(adapter); 2725 qlcnic_cancel_idc_work(adapter);
2726 qlcnic_sriov_pf_disable(adapter);
2704 ahw = adapter->ahw; 2727 ahw = adapter->ahw;
2705 2728
2706 unregister_netdev(netdev); 2729 unregister_netdev(netdev);
@@ -2735,6 +2758,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
2735 2758
2736 qlcnic_remove_sysfs(adapter); 2759 qlcnic_remove_sysfs(adapter);
2737 2760
2761 qlcnic_unregister_hwmon_dev(adapter);
2762
2738 qlcnic_cleanup_pci_map(adapter->ahw); 2763 qlcnic_cleanup_pci_map(adapter->ahw);
2739 2764
2740 qlcnic_release_firmware(adapter); 2765 qlcnic_release_firmware(adapter);
@@ -2828,6 +2853,8 @@ static int qlcnic_close(struct net_device *netdev)
2828 return 0; 2853 return 0;
2829} 2854}
2830 2855
2856#define QLCNIC_VF_LB_BUCKET_SIZE 1
2857
2831void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) 2858void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2832{ 2859{
2833 void *head; 2860 void *head;
@@ -2843,7 +2870,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2843 spin_lock_init(&adapter->mac_learn_lock); 2870 spin_lock_init(&adapter->mac_learn_lock);
2844 spin_lock_init(&adapter->rx_mac_learn_lock); 2871 spin_lock_init(&adapter->rx_mac_learn_lock);
2845 2872
2846 if (qlcnic_82xx_check(adapter)) { 2873 if (qlcnic_sriov_vf_check(adapter)) {
2874 filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
2875 adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
2876 } else if (qlcnic_82xx_check(adapter)) {
2847 filter_size = QLCNIC_LB_MAX_FILTERS; 2877 filter_size = QLCNIC_LB_MAX_FILTERS;
2848 adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE; 2878 adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
2849 } else { 2879 } else {
@@ -3973,16 +4003,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3973 strcpy(buf, "Tx"); 4003 strcpy(buf, "Tx");
3974 } 4004 }
3975 4005
3976 if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
3977 netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
3978 return -EINVAL;
3979 }
3980
3981 if (adapter->flags & QLCNIC_MSI_ENABLED) {
3982 netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
3983 return -EINVAL;
3984 }
3985
3986 if (!is_power_of_2(ring_cnt)) { 4006 if (!is_power_of_2(ring_cnt)) {
3987 netdev_err(netdev, "%s rings value should be a power of 2\n", 4007 netdev_err(netdev, "%s rings value should be a power of 2\n",
3988 buf); 4008 buf);
@@ -4122,7 +4142,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4122 4142
4123 rcu_read_lock(); 4143 rcu_read_lock();
4124 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { 4144 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4125 dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid); 4145 dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
4126 if (!dev) 4146 if (!dev)
4127 continue; 4147 continue;
4128 qlcnic_config_indev_addr(adapter, dev, event); 4148 qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b1266b..e46fc39d425d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
238 238
239 hdr->drv_cap_mask = hdr->cap_mask; 239 hdr->drv_cap_mask = hdr->cap_mask;
240 fw_dump->cap_mask = hdr->cap_mask; 240 fw_dump->cap_mask = hdr->cap_mask;
241
242 fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
241} 243}
242 244
243inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index) 245inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
276 hdr->saved_state[index] = value; 278 hdr->saved_state[index] = value;
277} 279}
278 280
281#define QLCNIC_TEMPLATE_VERSION (0x20001)
282
279void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) 283void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
280{ 284{
281 struct qlcnic_83xx_dump_template_hdr *hdr; 285 struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
288 292
289 hdr->drv_cap_mask = hdr->cap_mask; 293 hdr->drv_cap_mask = hdr->cap_mask;
290 fw_dump->cap_mask = hdr->cap_mask; 294 fw_dump->cap_mask = hdr->cap_mask;
295
296 fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
297 QLCNIC_TEMPLATE_VERSION;
291} 298}
292 299
293inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index) 300inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -653,34 +660,31 @@ out:
653#define QLC_DMA_CMD_BUFF_ADDR_HI 4 660#define QLC_DMA_CMD_BUFF_ADDR_HI 4
654#define QLC_DMA_CMD_STATUS_CTRL 8 661#define QLC_DMA_CMD_STATUS_CTRL 8
655 662
656#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
657
658static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, 663static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
659 struct __mem *mem) 664 struct __mem *mem)
660{ 665{
661 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
662 struct device *dev = &adapter->pdev->dev; 666 struct device *dev = &adapter->pdev->dev;
663 u32 dma_no, dma_base_addr, temp_addr; 667 u32 dma_no, dma_base_addr, temp_addr;
664 int i, ret, dma_sts; 668 int i, ret, dma_sts;
669 void *tmpl_hdr;
665 670
666 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; 671 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
667 dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; 672 dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
673 QLC_83XX_DMA_ENGINE_INDEX);
668 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); 674 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
669 675
670 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; 676 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
671 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 677 ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
672 mem->desc_card_addr);
673 if (ret) 678 if (ret)
674 return ret; 679 return ret;
675 680
676 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; 681 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
677 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0); 682 ret = qlcnic_ind_wr(adapter, temp_addr, 0);
678 if (ret) 683 if (ret)
679 return ret; 684 return ret;
680 685
681 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; 686 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
682 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 687 ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
683 mem->start_dma_cmd);
684 if (ret) 688 if (ret)
685 return ret; 689 return ret;
686 690
@@ -710,15 +714,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 714 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
711 u32 temp, dma_base_addr, size = 0, read_size = 0; 715 u32 temp, dma_base_addr, size = 0, read_size = 0;
712 struct qlcnic_pex_dma_descriptor *dma_descr; 716 struct qlcnic_pex_dma_descriptor *dma_descr;
713 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
714 struct device *dev = &adapter->pdev->dev; 717 struct device *dev = &adapter->pdev->dev;
715 dma_addr_t dma_phys_addr; 718 dma_addr_t dma_phys_addr;
716 void *dma_buffer; 719 void *dma_buffer;
720 void *tmpl_hdr;
717 721
718 tmpl_hdr = fw_dump->tmpl_hdr; 722 tmpl_hdr = fw_dump->tmpl_hdr;
719 723
720 /* Check if DMA engine is available */ 724 /* Check if DMA engine is available */
721 temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; 725 temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
726 QLC_83XX_DMA_ENGINE_INDEX);
722 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); 727 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
723 temp = qlcnic_ind_rd(adapter, 728 temp = qlcnic_ind_rd(adapter,
724 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); 729 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +769,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
764 769
765 /* Write DMA descriptor to MS memory*/ 770 /* Write DMA descriptor to MS memory*/
766 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; 771 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
767 *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr, 772 *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
768 (u32 *)dma_descr, temp); 773 (u32 *)dma_descr, temp);
769 if (*ret) { 774 if (*ret) {
770 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", 775 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
771 mem->desc_card_addr); 776 mem->desc_card_addr);
@@ -1141,8 +1146,6 @@ free_mem:
1141 return err; 1146 return err;
1142} 1147}
1143 1148
1144#define QLCNIC_TEMPLATE_VERSION (0x20001)
1145
1146int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 1149int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1147{ 1150{
1148 struct qlcnic_hardware_context *ahw; 1151 struct qlcnic_hardware_context *ahw;
@@ -1150,6 +1153,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1150 u32 version, csum, *tmp_buf; 1153 u32 version, csum, *tmp_buf;
1151 u8 use_flash_temp = 0; 1154 u8 use_flash_temp = 0;
1152 u32 temp_size = 0; 1155 u32 temp_size = 0;
1156 void *temp_buffer;
1153 int err; 1157 int err;
1154 1158
1155 ahw = adapter->ahw; 1159 ahw = adapter->ahw;
@@ -1199,16 +1203,23 @@ flash_temp:
1199 1203
1200 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump); 1204 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1201 1205
1206 if (fw_dump->use_pex_dma) {
1207 fw_dump->dma_buffer = NULL;
1208 temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1209 QLC_PEX_DMA_READ_SIZE,
1210 &fw_dump->phys_addr,
1211 GFP_KERNEL);
1212 if (!temp_buffer)
1213 fw_dump->use_pex_dma = false;
1214 else
1215 fw_dump->dma_buffer = temp_buffer;
1216 }
1217
1218
1202 dev_info(&adapter->pdev->dev, 1219 dev_info(&adapter->pdev->dev,
1203 "Default minidump capture mask 0x%x\n", 1220 "Default minidump capture mask 0x%x\n",
1204 fw_dump->cap_mask); 1221 fw_dump->cap_mask);
1205 1222
1206 if (qlcnic_83xx_check(adapter) &&
1207 (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
1208 fw_dump->use_pex_dma = true;
1209 else
1210 fw_dump->use_pex_dma = false;
1211
1212 qlcnic_enable_fw_dump_state(adapter); 1223 qlcnic_enable_fw_dump_state(adapter);
1213 1224
1214 return 0; 1225 return 0;
@@ -1224,7 +1235,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1224 struct device *dev = &adapter->pdev->dev; 1235 struct device *dev = &adapter->pdev->dev;
1225 struct qlcnic_hardware_context *ahw; 1236 struct qlcnic_hardware_context *ahw;
1226 struct qlcnic_dump_entry *entry; 1237 struct qlcnic_dump_entry *entry;
1227 void *temp_buffer, *tmpl_hdr; 1238 void *tmpl_hdr;
1228 u32 ocm_window; 1239 u32 ocm_window;
1229 __le32 *buffer; 1240 __le32 *buffer;
1230 char mesg[64]; 1241 char mesg[64];
@@ -1268,16 +1279,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1268 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION); 1279 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1269 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version); 1280 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1270 1281
1271 if (fw_dump->use_pex_dma) {
1272 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1273 &fw_dump->phys_addr,
1274 GFP_KERNEL);
1275 if (!temp_buffer)
1276 fw_dump->use_pex_dma = false;
1277 else
1278 fw_dump->dma_buffer = temp_buffer;
1279 }
1280
1281 if (qlcnic_82xx_check(adapter)) { 1282 if (qlcnic_82xx_check(adapter)) {
1282 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); 1283 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1283 fw_dump_ops = qlcnic_fw_dump_ops; 1284 fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1335,10 +1336,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1335 /* Send a udev event to notify availability of FW dump */ 1336 /* Send a udev event to notify availability of FW dump */
1336 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); 1337 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1337 1338
1338 if (fw_dump->use_pex_dma)
1339 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1340 fw_dump->dma_buffer, fw_dump->phys_addr);
1341
1342 return 0; 1339 return 0;
1343} 1340}
1344 1341
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1fd1d27..4677b2edccca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3, 52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
53}; 53};
54 54
55#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
55#define QLC_BC_CMD 1 56#define QLC_BC_CMD 1
56 57
57struct qlcnic_trans_list { 58struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
151 struct qlcnic_trans_list rcv_pend; 152 struct qlcnic_trans_list rcv_pend;
152 struct qlcnic_adapter *adapter; 153 struct qlcnic_adapter *adapter;
153 struct qlcnic_vport *vp; 154 struct qlcnic_vport *vp;
154 struct mutex vlan_list_lock; /* Lock for VLAN list */ 155 spinlock_t vlan_list_lock; /* Lock for VLAN list */
155}; 156};
156 157
157struct qlcnic_async_work_list { 158struct qlcnic_async_work_list {
158 struct list_head list; 159 struct list_head list;
159 struct work_struct work; 160 struct work_struct work;
160 void *ptr; 161 void *ptr;
162 struct qlcnic_cmd_args *cmd;
161}; 163};
162 164
163struct qlcnic_back_channel { 165struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
231void qlcnic_sriov_pf_reset(struct qlcnic_adapter *); 233void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
232int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *); 234int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
233int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *); 235int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
234int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int); 236int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
235int qlcnic_sriov_get_vf_config(struct net_device *, int , 237int qlcnic_sriov_get_vf_config(struct net_device *, int ,
236 struct ifla_vf_info *); 238 struct ifla_vf_info *);
237int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); 239int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1f5ab9..1659c804f1d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
39static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 39static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
40static int qlcnic_sriov_vf_shutdown(struct pci_dev *); 40static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); 41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
42static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
43 struct qlcnic_cmd_args *);
42 44
43static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 45static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
44 .read_crb = qlcnic_83xx_read_crb, 46 .read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
181 vf->adapter = adapter; 183 vf->adapter = adapter;
182 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); 184 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
183 mutex_init(&vf->send_cmd_lock); 185 mutex_init(&vf->send_cmd_lock);
184 mutex_init(&vf->vlan_list_lock); 186 spin_lock_init(&vf->vlan_list_lock);
185 INIT_LIST_HEAD(&vf->rcv_act.wait_list); 187 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
186 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); 188 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
187 spin_lock_init(&vf->rcv_act.lock); 189 spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
197 goto qlcnic_destroy_async_wq; 199 goto qlcnic_destroy_async_wq;
198 } 200 }
199 sriov->vf_info[i].vp = vp; 201 sriov->vf_info[i].vp = vp;
202 vp->vlan_mode = QLC_GUEST_VLAN_MODE;
200 vp->max_tx_bw = MAX_BW; 203 vp->max_tx_bw = MAX_BW;
201 vp->spoofchk = true; 204 vp->min_tx_bw = MIN_BW;
205 vp->spoofchk = false;
202 random_ether_addr(vp->mac); 206 random_ether_addr(vp->mac);
203 dev_info(&adapter->pdev->dev, 207 dev_info(&adapter->pdev->dev,
204 "MAC Address %pM is configured for VF %d\n", 208 "MAC Address %pM is configured for VF %d\n",
@@ -454,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
454 struct qlcnic_cmd_args cmd; 458 struct qlcnic_cmd_args cmd;
455 int ret = 0; 459 int ret = 0;
456 460
461 memset(&cmd, 0, sizeof(cmd));
457 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 462 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
458 if (ret) 463 if (ret)
459 return ret; 464 return ret;
@@ -515,6 +520,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
515{ 520{
516 int err; 521 int err;
517 522
523 adapter->flags |= QLCNIC_VLAN_FILTERING;
524 adapter->ahw->total_nic_func = 1;
518 INIT_LIST_HEAD(&adapter->vf_mc_list); 525 INIT_LIST_HEAD(&adapter->vf_mc_list);
519 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 526 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
520 dev_warn(&adapter->pdev->dev, 527 dev_warn(&adapter->pdev->dev,
@@ -770,6 +777,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
770 cmd->req.arg = (u32 *)trans->req_pay; 777 cmd->req.arg = (u32 *)trans->req_pay;
771 cmd->rsp.arg = (u32 *)trans->rsp_pay; 778 cmd->rsp.arg = (u32 *)trans->rsp_pay;
772 cmd_op = cmd->req.arg[0] & 0xff; 779 cmd_op = cmd->req.arg[0] & 0xff;
780 cmd->cmd_op = cmd_op;
773 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 781 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
774 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 782 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
775 if (remainder) 783 if (remainder)
@@ -1356,7 +1364,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1356 return -EIO; 1364 return -EIO;
1357} 1365}
1358 1366
1359static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, 1367static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1360 struct qlcnic_cmd_args *cmd) 1368 struct qlcnic_cmd_args *cmd)
1361{ 1369{
1362 struct qlcnic_hardware_context *ahw = adapter->ahw; 1370 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1416,17 @@ retry:
1408 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { 1416 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1409 rsp = QLCNIC_RCODE_SUCCESS; 1417 rsp = QLCNIC_RCODE_SUCCESS;
1410 } else { 1418 } else {
1411 rsp = mbx_err_code; 1419 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1412 if (!rsp) 1420 rsp = QLCNIC_RCODE_SUCCESS;
1413 rsp = 1; 1421 } else {
1414 dev_err(dev, 1422 rsp = mbx_err_code;
1415 "MBX command 0x%x failed with err:0x%x for VF %d\n", 1423 if (!rsp)
1416 opcode, mbx_err_code, func); 1424 rsp = 1;
1425
1426 dev_err(dev,
1427 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1428 opcode, mbx_err_code, func);
1429 }
1417 } 1430 }
1418 1431
1419err_out: 1432err_out:
@@ -1435,12 +1448,23 @@ free_cmd:
1435 return rsp; 1448 return rsp;
1436} 1449}
1437 1450
1451
1452static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1453 struct qlcnic_cmd_args *cmd)
1454{
1455 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1456 return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1457 else
1458 return __qlcnic_sriov_issue_cmd(adapter, cmd);
1459}
1460
1438static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) 1461static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1439{ 1462{
1440 struct qlcnic_cmd_args cmd; 1463 struct qlcnic_cmd_args cmd;
1441 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; 1464 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1442 int ret; 1465 int ret;
1443 1466
1467 memset(&cmd, 0, sizeof(cmd));
1444 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) 1468 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1445 return -ENOMEM; 1469 return -ENOMEM;
1446 1470
@@ -1465,58 +1489,28 @@ out:
1465 return ret; 1489 return ret;
1466} 1490}
1467 1491
1468static void qlcnic_vf_add_mc_list(struct net_device *netdev) 1492static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
1469{ 1493{
1470 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1494 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1471 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1495 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1472 struct qlcnic_mac_vlan_list *cur;
1473 struct list_head *head, tmp_list;
1474 struct qlcnic_vf_info *vf; 1496 struct qlcnic_vf_info *vf;
1475 u16 vlan_id; 1497 u16 vlan_id;
1476 int i; 1498 int i;
1477 1499
1478 static const u8 bcast_addr[ETH_ALEN] = {
1479 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1480 };
1481
1482 vf = &adapter->ahw->sriov->vf_info[0]; 1500 vf = &adapter->ahw->sriov->vf_info[0];
1483 INIT_LIST_HEAD(&tmp_list);
1484 head = &adapter->vf_mc_list;
1485 netif_addr_lock_bh(netdev);
1486 1501
1487 while (!list_empty(head)) { 1502 if (!qlcnic_sriov_check_any_vlan(vf)) {
1488 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); 1503 qlcnic_nic_add_mac(adapter, mac, 0);
1489 list_move(&cur->list, &tmp_list); 1504 } else {
1490 } 1505 spin_lock(&vf->vlan_list_lock);
1491 1506 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1492 netif_addr_unlock_bh(netdev); 1507 vlan_id = vf->sriov_vlans[i];
1493 1508 if (vlan_id)
1494 while (!list_empty(&tmp_list)) { 1509 qlcnic_nic_add_mac(adapter, mac, vlan_id);
1495 cur = list_entry((&tmp_list)->next,
1496 struct qlcnic_mac_vlan_list, list);
1497 if (!qlcnic_sriov_check_any_vlan(vf)) {
1498 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1499 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1500 } else {
1501 mutex_lock(&vf->vlan_list_lock);
1502 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1503 vlan_id = vf->sriov_vlans[i];
1504 if (vlan_id) {
1505 qlcnic_nic_add_mac(adapter, bcast_addr,
1506 vlan_id);
1507 qlcnic_nic_add_mac(adapter,
1508 cur->mac_addr,
1509 vlan_id);
1510 }
1511 }
1512 mutex_unlock(&vf->vlan_list_lock);
1513 if (qlcnic_84xx_check(adapter)) {
1514 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1515 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1516 }
1517 } 1510 }
1518 list_del(&cur->list); 1511 spin_unlock(&vf->vlan_list_lock);
1519 kfree(cur); 1512 if (qlcnic_84xx_check(adapter))
1513 qlcnic_nic_add_mac(adapter, mac, 0);
1520 } 1514 }
1521} 1515}
1522 1516
@@ -1525,6 +1519,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1525 struct list_head *head = &bc->async_list; 1519 struct list_head *head = &bc->async_list;
1526 struct qlcnic_async_work_list *entry; 1520 struct qlcnic_async_work_list *entry;
1527 1521
1522 flush_workqueue(bc->bc_async_wq);
1528 while (!list_empty(head)) { 1523 while (!list_empty(head)) {
1529 entry = list_entry(head->next, struct qlcnic_async_work_list, 1524 entry = list_entry(head->next, struct qlcnic_async_work_list,
1530 list); 1525 list);
@@ -1534,10 +1529,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1534 } 1529 }
1535} 1530}
1536 1531
1537static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1532void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1538{ 1533{
1539 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1534 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1540 struct qlcnic_hardware_context *ahw = adapter->ahw; 1535 struct qlcnic_hardware_context *ahw = adapter->ahw;
1536 static const u8 bcast_addr[ETH_ALEN] = {
1537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1538 };
1539 struct netdev_hw_addr *ha;
1541 u32 mode = VPORT_MISS_MODE_DROP; 1540 u32 mode = VPORT_MISS_MODE_DROP;
1542 1541
1543 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 1542 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1548,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1549 } else if ((netdev->flags & IFF_ALLMULTI) || 1548 } else if ((netdev->flags & IFF_ALLMULTI) ||
1550 (netdev_mc_count(netdev) > ahw->max_mc_count)) { 1549 (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1551 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1550 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1551 } else {
1552 qlcnic_vf_add_mc_list(netdev, bcast_addr);
1553 if (!netdev_mc_empty(netdev)) {
1554 netdev_for_each_mc_addr(ha, netdev)
1555 qlcnic_vf_add_mc_list(netdev, ha->addr);
1556 }
1552 } 1557 }
1553 1558
1554 if (qlcnic_sriov_vf_check(adapter)) 1559 /* configure unicast MAC address, if there is not sufficient space
1555 qlcnic_vf_add_mc_list(netdev); 1560 * to store all the unicast addresses then enable promiscuous mode
1561 */
1562 if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1563 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1564 } else if (!netdev_uc_empty(netdev)) {
1565 netdev_for_each_uc_addr(ha, netdev)
1566 qlcnic_vf_add_mc_list(netdev, ha->addr);
1567 }
1568
1569 if (adapter->pdev->is_virtfn) {
1570 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1571 !adapter->fdb_mac_learn) {
1572 qlcnic_alloc_lb_filters_mem(adapter);
1573 adapter->drv_mac_learn = 1;
1574 adapter->rx_mac_learn = true;
1575 } else {
1576 adapter->drv_mac_learn = 0;
1577 adapter->rx_mac_learn = false;
1578 }
1579 }
1556 1580
1557 qlcnic_nic_set_promisc(adapter, mode); 1581 qlcnic_nic_set_promisc(adapter, mode);
1558} 1582}
1559 1583
1560static void qlcnic_sriov_handle_async_multi(struct work_struct *work) 1584static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1561{ 1585{
1562 struct qlcnic_async_work_list *entry; 1586 struct qlcnic_async_work_list *entry;
1563 struct net_device *netdev; 1587 struct qlcnic_adapter *adapter;
1588 struct qlcnic_cmd_args *cmd;
1564 1589
1565 entry = container_of(work, struct qlcnic_async_work_list, work); 1590 entry = container_of(work, struct qlcnic_async_work_list, work);
1566 netdev = (struct net_device *)entry->ptr; 1591 adapter = entry->ptr;
1567 1592 cmd = entry->cmd;
1568 qlcnic_sriov_vf_set_multi(netdev); 1593 __qlcnic_sriov_issue_cmd(adapter, cmd);
1569 return; 1594 return;
1570} 1595}
1571 1596
@@ -1595,8 +1620,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1595 return entry; 1620 return entry;
1596} 1621}
1597 1622
1598static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, 1623static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1599 work_func_t func, void *data) 1624 work_func_t func, void *data,
1625 struct qlcnic_cmd_args *cmd)
1600{ 1626{
1601 struct qlcnic_async_work_list *entry = NULL; 1627 struct qlcnic_async_work_list *entry = NULL;
1602 1628
@@ -1605,21 +1631,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1605 return; 1631 return;
1606 1632
1607 entry->ptr = data; 1633 entry->ptr = data;
1634 entry->cmd = cmd;
1608 INIT_WORK(&entry->work, func); 1635 INIT_WORK(&entry->work, func);
1609 queue_work(bc->bc_async_wq, &entry->work); 1636 queue_work(bc->bc_async_wq, &entry->work);
1610} 1637}
1611 1638
1612void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) 1639static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1640 struct qlcnic_cmd_args *cmd)
1613{ 1641{
1614 1642
1615 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1616 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; 1643 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1617 1644
1618 if (adapter->need_fw_reset) 1645 if (adapter->need_fw_reset)
1619 return; 1646 return -EIO;
1620 1647
1621 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, 1648 qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
1622 netdev); 1649 adapter, cmd);
1650 return 0;
1623} 1651}
1624 1652
1625static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) 1653static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1871,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1843 return 0; 1871 return 0;
1844} 1872}
1845 1873
1874static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1875{
1876 if (adapter->fhash.fnum)
1877 qlcnic_prune_lb_filters(adapter);
1878}
1879
1846static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) 1880static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1847{ 1881{
1848 struct qlcnic_adapter *adapter; 1882 struct qlcnic_adapter *adapter;
@@ -1874,6 +1908,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1874 } 1908 }
1875 1909
1876 idc->prev_state = idc->curr_state; 1910 idc->prev_state = idc->curr_state;
1911 qlcnic_sriov_vf_periodic_tasks(adapter);
1912
1877 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) 1913 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1878 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1914 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1879 idc->delay); 1915 idc->delay);
@@ -1897,7 +1933,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1897 if (!vf->sriov_vlans) 1933 if (!vf->sriov_vlans)
1898 return err; 1934 return err;
1899 1935
1900 mutex_lock(&vf->vlan_list_lock); 1936 spin_lock_bh(&vf->vlan_list_lock);
1901 1937
1902 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1938 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1903 if (vf->sriov_vlans[i] == vlan_id) { 1939 if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1942,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1906 } 1942 }
1907 } 1943 }
1908 1944
1909 mutex_unlock(&vf->vlan_list_lock); 1945 spin_unlock_bh(&vf->vlan_list_lock);
1910 return err; 1946 return err;
1911} 1947}
1912 1948
@@ -1915,12 +1951,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1915{ 1951{
1916 int err = 0; 1952 int err = 0;
1917 1953
1918 mutex_lock(&vf->vlan_list_lock); 1954 spin_lock_bh(&vf->vlan_list_lock);
1919 1955
1920 if (vf->num_vlan >= sriov->num_allowed_vlans) 1956 if (vf->num_vlan >= sriov->num_allowed_vlans)
1921 err = -EINVAL; 1957 err = -EINVAL;
1922 1958
1923 mutex_unlock(&vf->vlan_list_lock); 1959 spin_unlock_bh(&vf->vlan_list_lock);
1924 return err; 1960 return err;
1925} 1961}
1926 1962
@@ -1973,7 +2009,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1973 if (!vf->sriov_vlans) 2009 if (!vf->sriov_vlans)
1974 return; 2010 return;
1975 2011
1976 mutex_lock(&vf->vlan_list_lock); 2012 spin_lock_bh(&vf->vlan_list_lock);
1977 2013
1978 switch (opcode) { 2014 switch (opcode) {
1979 case QLC_VLAN_ADD: 2015 case QLC_VLAN_ADD:
@@ -1986,7 +2022,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1986 netdev_err(adapter->netdev, "Invalid VLAN operation\n"); 2022 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1987 } 2023 }
1988 2024
1989 mutex_unlock(&vf->vlan_list_lock); 2025 spin_unlock_bh(&vf->vlan_list_lock);
1990 return; 2026 return;
1991} 2027}
1992 2028
@@ -1994,10 +2030,12 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1994 u16 vid, u8 enable) 2030 u16 vid, u8 enable)
1995{ 2031{
1996 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 2032 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2033 struct net_device *netdev = adapter->netdev;
1997 struct qlcnic_vf_info *vf; 2034 struct qlcnic_vf_info *vf;
1998 struct qlcnic_cmd_args cmd; 2035 struct qlcnic_cmd_args cmd;
1999 int ret; 2036 int ret;
2000 2037
2038 memset(&cmd, 0, sizeof(cmd));
2001 if (vid == 0) 2039 if (vid == 0)
2002 return 0; 2040 return 0;
2003 2041
@@ -2019,14 +2057,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2019 dev_err(&adapter->pdev->dev, 2057 dev_err(&adapter->pdev->dev,
2020 "Failed to configure guest VLAN, err=%d\n", ret); 2058 "Failed to configure guest VLAN, err=%d\n", ret);
2021 } else { 2059 } else {
2060 netif_addr_lock_bh(netdev);
2022 qlcnic_free_mac_list(adapter); 2061 qlcnic_free_mac_list(adapter);
2062 netif_addr_unlock_bh(netdev);
2023 2063
2024 if (enable) 2064 if (enable)
2025 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); 2065 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2026 else 2066 else
2027 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); 2067 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2028 2068
2029 qlcnic_set_multi(adapter->netdev); 2069 netif_addr_lock_bh(netdev);
2070 qlcnic_set_multi(netdev);
2071 netif_addr_unlock_bh(netdev);
2030 } 2072 }
2031 2073
2032 qlcnic_free_mbx_args(&cmd); 2074 qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2199,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2157{ 2199{
2158 bool err = false; 2200 bool err = false;
2159 2201
2160 mutex_lock(&vf->vlan_list_lock); 2202 spin_lock_bh(&vf->vlan_list_lock);
2161 2203
2162 if (vf->num_vlan) 2204 if (vf->num_vlan)
2163 err = true; 2205 err = true;
2164 2206
2165 mutex_unlock(&vf->vlan_list_lock); 2207 spin_unlock_bh(&vf->vlan_list_lock);
2166 return err; 2208 return err;
2167} 2209}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 280137991544..a29538b86edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
16#define QLC_VF_FLOOD_BIT BIT_16 16#define QLC_VF_FLOOD_BIT BIT_16
17#define QLC_FLOOD_MODE 0x5 17#define QLC_FLOOD_MODE 0x5
18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19 18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
19#define QLC_INTR_COAL_TYPE_MASK 0x7
19 20
20static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); 21static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
21 22
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
83 info->max_tx_ques = res->num_tx_queues / max; 84 info->max_tx_ques = res->num_tx_queues / max;
84 85
85 if (qlcnic_83xx_pf_check(adapter)) 86 if (qlcnic_83xx_pf_check(adapter))
86 num_macs = 1; 87 num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
87 88
88 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 89 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
89 90
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
337 338
338 cmd.req.arg[1] = 0x4; 339 cmd.req.arg[1] = 0x4;
339 if (enable) { 340 if (enable) {
341 adapter->flags |= QLCNIC_VLAN_FILTERING;
340 cmd.req.arg[1] |= BIT_16; 342 cmd.req.arg[1] |= BIT_16;
341 if (qlcnic_84xx_check(adapter)) 343 if (qlcnic_84xx_check(adapter))
342 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0; 344 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
345 } else {
346 adapter->flags &= ~QLCNIC_VLAN_FILTERING;
343 } 347 }
344 348
345 err = qlcnic_issue_cmd(adapter, &cmd); 349 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
471 return -EPERM; 475 return -EPERM;
472 } 476 }
473 477
478 qlcnic_sriov_pf_disable(adapter);
479
474 rtnl_lock(); 480 rtnl_lock();
475 if (netif_running(netdev)) 481 if (netif_running(netdev))
476 __qlcnic_down(adapter, netdev); 482 __qlcnic_down(adapter, netdev);
477 483
478 qlcnic_sriov_pf_disable(adapter);
479
480 qlcnic_sriov_free_vlans(adapter); 484 qlcnic_sriov_free_vlans(adapter);
481 485
482 qlcnic_sriov_pf_cleanup(adapter); 486 qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
595 599
596 qlcnic_sriov_alloc_vlans(adapter); 600 qlcnic_sriov_alloc_vlans(adapter);
597 601
598 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
599 return err; 602 return err;
600 603
601del_flr_queue: 604del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
626 __qlcnic_down(adapter, netdev); 629 __qlcnic_down(adapter, netdev);
627 630
628 err = __qlcnic_pci_sriov_enable(adapter, num_vfs); 631 err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
629 if (err) { 632 if (err)
630 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", 633 goto error;
631 adapter->portnum);
632 634
633 err = -EIO; 635 if (netif_running(netdev))
634 if (qlcnic_83xx_configure_opmode(adapter)) 636 __qlcnic_up(adapter, netdev);
635 goto error; 637
636 } else { 638 rtnl_unlock();
639 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
640 if (!err) {
637 netdev_info(netdev, 641 netdev_info(netdev,
638 "SR-IOV is enabled successfully on port %d\n", 642 "SR-IOV is enabled successfully on port %d\n",
639 adapter->portnum); 643 adapter->portnum);
640 /* Return number of vfs enabled */ 644 /* Return number of vfs enabled */
641 err = num_vfs; 645 return num_vfs;
642 } 646 }
647
648 rtnl_lock();
643 if (netif_running(netdev)) 649 if (netif_running(netdev))
644 __qlcnic_up(adapter, netdev); 650 __qlcnic_down(adapter, netdev);
645 651
646error: 652error:
653 if (!qlcnic_83xx_configure_opmode(adapter)) {
654 if (netif_running(netdev))
655 __qlcnic_up(adapter, netdev);
656 }
657
647 rtnl_unlock(); 658 rtnl_unlock();
659 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
660 adapter->portnum);
661
648 return err; 662 return err;
649} 663}
650 664
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
773 struct qlcnic_vf_info *vf, 787 struct qlcnic_vf_info *vf,
774 u16 vlan, u8 op) 788 u16 vlan, u8 op)
775{ 789{
776 struct qlcnic_cmd_args cmd; 790 struct qlcnic_cmd_args *cmd;
777 struct qlcnic_macvlan_mbx mv; 791 struct qlcnic_macvlan_mbx mv;
778 struct qlcnic_vport *vp; 792 struct qlcnic_vport *vp;
779 u8 *addr; 793 u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
783 797
784 vp = vf->vp; 798 vp = vf->vp;
785 799
786 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN)) 800 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
801 if (!cmd)
787 return -ENOMEM; 802 return -ENOMEM;
788 803
804 err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
805 if (err)
806 goto free_cmd;
807
808 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
789 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); 809 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
790 if (vpid < 0) { 810 if (vpid < 0) {
791 err = -EINVAL; 811 err = -EINVAL;
792 goto out; 812 goto free_args;
793 } 813 }
794 814
795 if (vlan) 815 if (vlan)
796 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? 816 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
797 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); 817 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
798 818
799 cmd.req.arg[1] = op | (1 << 8) | (3 << 6); 819 cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
800 cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; 820 cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
801 821
802 addr = vp->mac; 822 addr = vp->mac;
803 mv.vlan = vlan; 823 mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
807 mv.mac_addr3 = addr[3]; 827 mv.mac_addr3 = addr[3];
808 mv.mac_addr4 = addr[4]; 828 mv.mac_addr4 = addr[4];
809 mv.mac_addr5 = addr[5]; 829 mv.mac_addr5 = addr[5];
810 buf = &cmd.req.arg[2]; 830 buf = &cmd->req.arg[2];
811 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 831 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
812 832
813 err = qlcnic_issue_cmd(adapter, &cmd); 833 err = qlcnic_issue_cmd(adapter, cmd);
814 834
815 if (err) 835 if (!err)
816 dev_err(&adapter->pdev->dev, 836 return err;
817 "MAC-VLAN %s to CAM failed, err=%d.\n",
818 ((op == 1) ? "add " : "delete "), err);
819 837
820out: 838free_args:
821 qlcnic_free_mbx_args(&cmd); 839 qlcnic_free_mbx_args(cmd);
840free_cmd:
841 kfree(cmd);
822 return err; 842 return err;
823} 843}
824 844
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
840 860
841 sriov = adapter->ahw->sriov; 861 sriov = adapter->ahw->sriov;
842 862
843 mutex_lock(&vf->vlan_list_lock); 863 spin_lock_bh(&vf->vlan_list_lock);
844 if (vf->num_vlan) { 864 if (vf->num_vlan) {
845 for (i = 0; i < sriov->num_allowed_vlans; i++) { 865 for (i = 0; i < sriov->num_allowed_vlans; i++) {
846 vlan = vf->sriov_vlans[i]; 866 vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
849 opcode); 869 opcode);
850 } 870 }
851 } 871 }
852 mutex_unlock(&vf->vlan_list_lock); 872 spin_unlock_bh(&vf->vlan_list_lock);
853 873
854 if (vf->vp->vlan_mode != QLC_PVID_MODE) { 874 if (vf->vp->vlan_mode != QLC_PVID_MODE) {
855 if (qlcnic_83xx_pf_check(adapter) && 875 if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
1178{ 1198{
1179 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; 1199 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
1180 u16 ctx_id, pkts, time; 1200 u16 ctx_id, pkts, time;
1201 int err = -EINVAL;
1202 u8 type;
1181 1203
1204 type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
1182 ctx_id = cmd->req.arg[1] >> 16; 1205 ctx_id = cmd->req.arg[1] >> 16;
1183 pkts = cmd->req.arg[2] & 0xffff; 1206 pkts = cmd->req.arg[2] & 0xffff;
1184 time = cmd->req.arg[2] >> 16; 1207 time = cmd->req.arg[2] >> 16;
1185 1208
1186 if (ctx_id != vf->rx_ctx_id) 1209 switch (type) {
1187 return -EINVAL; 1210 case QLCNIC_INTR_COAL_TYPE_RX:
1188 if (pkts > coal->rx_packets) 1211 if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
1189 return -EINVAL; 1212 time < coal->rx_time_us)
1190 if (time < coal->rx_time_us) 1213 goto err_label;
1191 return -EINVAL; 1214 break;
1215 case QLCNIC_INTR_COAL_TYPE_TX:
1216 if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
1217 time < coal->tx_time_us)
1218 goto err_label;
1219 break;
1220 default:
1221 netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
1222 type);
1223 return err;
1224 }
1192 1225
1193 return 0; 1226 return 0;
1227
1228err_label:
1229 netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
1230 vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
1231 vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
1232 netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
1233 ctx_id, pkts, time, type);
1234
1235 return err;
1194} 1236}
1195 1237
1196static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, 1238static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1214 struct qlcnic_vf_info *vf, 1256 struct qlcnic_vf_info *vf,
1215 struct qlcnic_cmd_args *cmd) 1257 struct qlcnic_cmd_args *cmd)
1216{ 1258{
1217 struct qlcnic_macvlan_mbx *macvlan;
1218 struct qlcnic_vport *vp = vf->vp; 1259 struct qlcnic_vport *vp = vf->vp;
1219 u8 op, new_op; 1260 u8 op, new_op;
1220 1261
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1224 cmd->req.arg[1] |= (vf->vp->handle << 16); 1265 cmd->req.arg[1] |= (vf->vp->handle << 16);
1225 cmd->req.arg[1] |= BIT_31; 1266 cmd->req.arg[1] |= BIT_31;
1226 1267
1227 macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
1228 if (!(macvlan->mac_addr0 & BIT_0)) {
1229 dev_err(&adapter->pdev->dev,
1230 "MAC address change is not allowed from VF %d",
1231 vf->pci_func);
1232 return -EINVAL;
1233 }
1234
1235 if (vp->vlan_mode == QLC_PVID_MODE) { 1268 if (vp->vlan_mode == QLC_PVID_MODE) {
1236 op = cmd->req.arg[1] & 0x7; 1269 op = cmd->req.arg[1] & 0x7;
1237 cmd->req.arg[1] &= ~0x7; 1270 cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1815 return 0; 1848 return 0;
1816} 1849}
1817 1850
1818int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate) 1851int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
1852 int min_tx_rate, int max_tx_rate)
1819{ 1853{
1820 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1854 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1821 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1855 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
1830 if (vf >= sriov->num_vfs) 1864 if (vf >= sriov->num_vfs)
1831 return -EINVAL; 1865 return -EINVAL;
1832 1866
1833 if (tx_rate >= 10000 || tx_rate < 100) { 1867 vf_info = &sriov->vf_info[vf];
1868 vp = vf_info->vp;
1869 vpid = vp->handle;
1870
1871 if (!min_tx_rate)
1872 min_tx_rate = QLC_VF_MIN_TX_RATE;
1873
1874 if (max_tx_rate &&
1875 (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
1834 netdev_err(netdev, 1876 netdev_err(netdev,
1835 "Invalid Tx rate, allowed range is [%d - %d]", 1877 "Invalid max Tx rate, allowed range is [%d - %d]",
1836 QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE); 1878 min_tx_rate, QLC_VF_MAX_TX_RATE);
1837 return -EINVAL; 1879 return -EINVAL;
1838 } 1880 }
1839 1881
1840 if (tx_rate == 0) 1882 if (!max_tx_rate)
1841 tx_rate = 10000; 1883 max_tx_rate = 10000;
1842 1884
1843 vf_info = &sriov->vf_info[vf]; 1885 if (min_tx_rate &&
1844 vp = vf_info->vp; 1886 (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
1845 vpid = vp->handle; 1887 netdev_err(netdev,
1888 "Invalid min Tx rate, allowed range is [%d - %d]",
1889 QLC_VF_MIN_TX_RATE, max_tx_rate);
1890 return -EINVAL;
1891 }
1846 1892
1847 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { 1893 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1848 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) 1894 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
1849 return -EIO; 1895 return -EIO;
1850 1896
1851 nic_info.max_tx_bw = tx_rate / 100; 1897 nic_info.max_tx_bw = max_tx_rate / 100;
1898 nic_info.min_tx_bw = min_tx_rate / 100;
1852 nic_info.bit_offsets = BIT_0; 1899 nic_info.bit_offsets = BIT_0;
1853 1900
1854 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) 1901 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
1855 return -EIO; 1902 return -EIO;
1856 } 1903 }
1857 1904
1858 vp->max_tx_bw = tx_rate / 100; 1905 vp->max_tx_bw = max_tx_rate / 100;
1859 netdev_info(netdev, 1906 netdev_info(netdev,
1860 "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", 1907 "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1861 tx_rate, vp->max_tx_bw, vf); 1908 max_tx_rate, vp->max_tx_bw, vf);
1909 vp->min_tx_bw = min_tx_rate / 100;
1910 netdev_info(netdev,
1911 "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1912 min_tx_rate, vp->min_tx_bw, vf);
1862 return 0; 1913 return 0;
1863} 1914}
1864 1915
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1957 ivi->qos = vp->qos; 2008 ivi->qos = vp->qos;
1958 ivi->spoofchk = vp->spoofchk; 2009 ivi->spoofchk = vp->spoofchk;
1959 if (vp->max_tx_bw == MAX_BW) 2010 if (vp->max_tx_bw == MAX_BW)
1960 ivi->tx_rate = 0; 2011 ivi->max_tx_rate = 0;
2012 else
2013 ivi->max_tx_rate = vp->max_tx_bw * 100;
2014 if (vp->min_tx_bw == MIN_BW)
2015 ivi->min_tx_rate = 0;
1961 else 2016 else
1962 ivi->tx_rate = vp->max_tx_bw * 100; 2017 ivi->min_tx_rate = vp->min_tx_bw * 100;
1963 2018
1964 ivi->vf = vf; 2019 ivi->vf = vf;
1965 return 0; 2020 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e27f2e1..f5786d5792df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20#include <linux/aer.h> 20#include <linux/aer.h>
21#include <linux/log2.h> 21#include <linux/log2.h>
22#ifdef CONFIG_QLCNIC_HWMON
23#include <linux/hwmon.h>
24#include <linux/hwmon-sysfs.h>
25#endif
22 26
23#define QLC_STATUS_UNSUPPORTED_CMD -2 27#define QLC_STATUS_UNSUPPORTED_CMD -2
24 28
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
358 if (adapter->npars[i].pci_func == pci_func) 362 if (adapter->npars[i].pci_func == pci_func)
359 return i; 363 return i;
360 } 364 }
365
366 dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
361 return -EINVAL; 367 return -EINVAL;
362} 368}
363 369
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
1243 .write = qlcnic_83xx_sysfs_flash_write_handler, 1249 .write = qlcnic_83xx_sysfs_flash_write_handler,
1244}; 1250};
1245 1251
1252#ifdef CONFIG_QLCNIC_HWMON
1253
1254static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
1255 struct device_attribute *dev_attr,
1256 char *buf)
1257{
1258 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1259 unsigned int temperature = 0, value = 0;
1260
1261 if (qlcnic_83xx_check(adapter))
1262 value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
1263 else if (qlcnic_82xx_check(adapter))
1264 value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
1265
1266 temperature = qlcnic_get_temp_val(value);
1267 /* display millidegree celcius */
1268 temperature *= 1000;
1269 return sprintf(buf, "%u\n", temperature);
1270}
1271
1272/* hwmon-sysfs attributes */
1273static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
1274 qlcnic_hwmon_show_temp, NULL, 1);
1275
1276static struct attribute *qlcnic_hwmon_attrs[] = {
1277 &sensor_dev_attr_temp1_input.dev_attr.attr,
1278 NULL
1279};
1280
1281ATTRIBUTE_GROUPS(qlcnic_hwmon);
1282
1283void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
1284{
1285 struct device *dev = &adapter->pdev->dev;
1286 struct device *hwmon_dev;
1287
1288 /* Skip hwmon registration for a VF device */
1289 if (qlcnic_sriov_vf_check(adapter)) {
1290 adapter->ahw->hwmon_dev = NULL;
1291 return;
1292 }
1293 hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
1294 adapter,
1295 qlcnic_hwmon_groups);
1296 if (IS_ERR(hwmon_dev)) {
1297 dev_err(dev, "Cannot register with hwmon, err=%ld\n",
1298 PTR_ERR(hwmon_dev));
1299 hwmon_dev = NULL;
1300 }
1301 adapter->ahw->hwmon_dev = hwmon_dev;
1302}
1303
1304void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
1305{
1306 struct device *hwmon_dev = adapter->ahw->hwmon_dev;
1307 if (hwmon_dev) {
1308 hwmon_device_unregister(hwmon_dev);
1309 adapter->ahw->hwmon_dev = NULL;
1310 }
1311}
1312#endif
1313
1246void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) 1314void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
1247{ 1315{
1248 struct device *dev = &adapter->pdev->dev; 1316 struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76acab81..b40050e03a56 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
3595 } 3595 }
3596 return status; 3596 return status;
3597err_irq: 3597err_irq:
3598 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); 3598 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3599 ql_free_irq(qdev); 3599 ql_free_irq(qdev);
3600 return status; 3600 return status;
3601} 3601}
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
4770 ndev->irq = pdev->irq; 4770 ndev->irq = pdev->irq;
4771 4771
4772 ndev->netdev_ops = &qlge_netdev_ops; 4772 ndev->netdev_ops = &qlge_netdev_ops;
4773 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); 4773 ndev->ethtool_ops = &qlge_ethtool_ops;
4774 ndev->watchdog_timeo = 10 * HZ; 4774 ndev->watchdog_timeo = 10 * HZ;
4775 4775
4776 err = register_netdev(ndev); 4776 err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index aa1c079f231d..be425ad5e824 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7125 for (i = 0; i < ETH_ALEN; i++) 7125 for (i = 0; i < ETH_ALEN; i++)
7126 dev->dev_addr[i] = RTL_R8(MAC0 + i); 7126 dev->dev_addr[i] = RTL_R8(MAC0 + i);
7127 7127
7128 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); 7128 dev->ethtool_ops = &rtl8169_ethtool_ops;
7129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT; 7129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
7130 7130
7131 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 7131 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a9509ccd33b..7622213beef1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
307}; 307};
308 308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
310 [ECMR] = 0x0160, 331 [ECMR] = 0x0160,
311 [ECSR] = 0x0164, 332 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168, 333 [ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
546 .register_type = SH_ETH_REG_FAST_SH4, 567 .register_type = SH_ETH_REG_FAST_SH4,
547 568
548 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549 .rmcr_value = RMCR_RNC,
550 570
551 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
552 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
624 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
625 EESR_TDE | EESR_ECI, 645 EESR_TDE | EESR_ECI,
626 .fdr_value = 0x0000072f, 646 .fdr_value = 0x0000072f,
627 .rmcr_value = RMCR_RNC,
628 647
629 .irq_flags = IRQF_SHARED, 648 .irq_flags = IRQF_SHARED,
630 .apr = 1, 649 .apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
752 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
753 EESR_TDE | EESR_ECI, 772 EESR_TDE | EESR_ECI,
754 .fdr_value = 0x0000070f, 773 .fdr_value = 0x0000070f,
755 .rmcr_value = RMCR_RNC,
756 774
757 .apr = 1, 775 .apr = 1,
758 .mpr = 1, 776 .mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
784 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
785 EESR_TDE | EESR_ECI, 803 EESR_TDE | EESR_ECI,
786 .fdr_value = 0x0000070f, 804 .fdr_value = 0x0000070f,
787 .rmcr_value = RMCR_RNC,
788 805
789 .no_psr = 1, 806 .no_psr = 1,
790 .apr = 1, 807 .apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
833 if (!cd->fdr_value) 850 if (!cd->fdr_value)
834 cd->fdr_value = DEFAULT_FDR_INIT; 851 cd->fdr_value = DEFAULT_FDR_INIT;
835 852
836 if (!cd->rmcr_value)
837 cd->rmcr_value = DEFAULT_RMCR_VALUE;
838
839 if (!cd->tx_check) 853 if (!cd->tx_check)
840 cd->tx_check = DEFAULT_TX_CHECK; 854 cd->tx_check = DEFAULT_TX_CHECK;
841 855
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1287 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1288 sh_eth_write(ndev, 0, TFTR); 1302 sh_eth_write(ndev, 0, TFTR);
1289 1303
1290 /* Frame recv control */ 1304 /* Frame recv control (enable multiple-packets per rx irq) */
1291 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
1292 1306
1293 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1307 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1294 1308
@@ -1385,7 +1399,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1385 int entry = mdp->cur_rx % mdp->num_rx_ring; 1399 int entry = mdp->cur_rx % mdp->num_rx_ring;
1386 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1400 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1387 struct sk_buff *skb; 1401 struct sk_buff *skb;
1388 int exceeded = 0;
1389 u16 pkt_len = 0; 1402 u16 pkt_len = 0;
1390 u32 desc_status; 1403 u32 desc_status;
1391 1404
@@ -1397,10 +1410,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 if (--boguscnt < 0) 1410 if (--boguscnt < 0)
1398 break; 1411 break;
1399 1412
1400 if (*quota <= 0) { 1413 if (*quota <= 0)
1401 exceeded = 1;
1402 break; 1414 break;
1403 } 1415
1404 (*quota)--; 1416 (*quota)--;
1405 1417
1406 if (!(desc_status & RDFEND)) 1418 if (!(desc_status & RDFEND))
@@ -1448,7 +1460,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 ndev->stats.rx_packets++; 1460 ndev->stats.rx_packets++;
1449 ndev->stats.rx_bytes += pkt_len; 1461 ndev->stats.rx_bytes += pkt_len;
1450 } 1462 }
1451 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1452 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1463 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1453 rxdesc = &mdp->rx_ring[entry]; 1464 rxdesc = &mdp->rx_ring[entry];
1454 } 1465 }
@@ -1494,7 +1505,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1494 sh_eth_write(ndev, EDRRR_R, EDRRR); 1505 sh_eth_write(ndev, EDRRR_R, EDRRR);
1495 } 1506 }
1496 1507
1497 return exceeded; 1508 return *quota <= 0;
1498} 1509}
1499 1510
1500static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1511static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -2627,8 +2638,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
2627 pdev->name, pdev->id); 2638 pdev->name, pdev->id);
2628 2639
2629 /* PHY IRQ */ 2640 /* PHY IRQ */
2630 mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR, 2641 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2631 GFP_KERNEL); 2642 GFP_KERNEL);
2632 if (!mdp->mii_bus->irq) { 2643 if (!mdp->mii_bus->irq) {
2633 ret = -ENOMEM; 2644 ret = -ENOMEM;
2634 goto out_free_bus; 2645 goto out_free_bus;
@@ -2843,7 +2854,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2843 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; 2854 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2844 else 2855 else
2845 ndev->netdev_ops = &sh_eth_netdev_ops; 2856 ndev->netdev_ops = &sh_eth_netdev_ops;
2846 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2857 ndev->ethtool_ops = &sh_eth_ethtool_ops;
2847 ndev->watchdog_timeo = TX_TIMEOUT; 2858 ndev->watchdog_timeo = TX_TIMEOUT;
2848 2859
2849 /* debug message level */ 2860 /* debug message level */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
319enum RMCR_BIT { 319enum RMCR_BIT {
320 RMCR_RNC = 0x00000001, 320 RMCR_RNC = 0x00000001,
321}; 321};
322#define DEFAULT_RMCR_VALUE 0x00000000
323 322
324/* ECMR */ 323/* ECMR */
325enum FELIC_MODE_BIT { 324enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
466 unsigned long fdr_value; 465 unsigned long fdr_value;
467 unsigned long fcftr_value; 466 unsigned long fcftr_value;
468 unsigned long rpadir_value; 467 unsigned long rpadir_value;
469 unsigned long rmcr_value;
470 468
471 /* interrupt checking mask */ 469 /* interrupt checking mask */
472 unsigned long tx_check; 470 unsigned long tx_check;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0415fa50eeb7..c0981ae45874 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
520 520
521void sxgbe_set_ethtool_ops(struct net_device *netdev) 521void sxgbe_set_ethtool_ops(struct net_device *netdev)
522{ 522{
523 SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops); 523 netdev->ethtool_ops = &sxgbe_ethtool_ops;
524} 524}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a983869f..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
425 * @rx_rsize: ring size 425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor 426 * Description: this function initializes the DMA RX descriptor
427 */ 427 */
428void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, 428static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
429 int rx_rsize) 429 int rx_rsize)
430{ 430{
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy); 432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
519 * @tx_rsize: ring size 519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor 520 * Description: this function initializes the DMA TX descriptor
521 */ 521 */
522void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring, 522static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
523 int tx_rsize) 523 int tx_rsize)
524{ 524{
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy); 526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
1221 1221
1222 return 0; 1222 return 0;
1223} 1223}
1224
1225/* Prepare first Tx descriptor for doing TSO operation */ 1224/* Prepare first Tx descriptor for doing TSO operation */
1226void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, 1225static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1227 struct sxgbe_tx_norm_desc *first_desc, 1226 struct sxgbe_tx_norm_desc *first_desc,
1228 struct sk_buff *skb) 1227 struct sk_buff *skb)
1229{ 1228{
1230 unsigned int total_hdr_len, tcp_hdr_len; 1229 unsigned int total_hdr_len, tcp_hdr_len;
1231 1230
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
1914 readl(ioaddr + SXGBE_HASH_LOW)); 1913 readl(ioaddr + SXGBE_HASH_LOW));
1915} 1914}
1916 1915
1917/**
1918 * sxgbe_config - entry point for changing configuration mode passed on by
1919 * ifconfig
1920 * @dev : pointer to the device structure
1921 * @map : pointer to the device mapping structure
1922 * Description:
1923 * This function is a driver entry point which gets called by the kernel
1924 * whenever some device configuration is changed.
1925 * Return value:
1926 * This function returns 0 if success and appropriate error otherwise.
1927 */
1928static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1929{
1930 struct sxgbe_priv_data *priv = netdev_priv(dev);
1931
1932 /* Can't act on a running interface */
1933 if (dev->flags & IFF_UP)
1934 return -EBUSY;
1935
1936 /* Don't allow changing the I/O address */
1937 if (map->base_addr != (unsigned long)priv->ioaddr) {
1938 netdev_warn(dev, "can't change I/O address\n");
1939 return -EOPNOTSUPP;
1940 }
1941
1942 /* Don't allow changing the IRQ */
1943 if (map->irq != priv->irq) {
1944 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1945 return -EOPNOTSUPP;
1946 }
1947
1948 return 0;
1949}
1950
1951#ifdef CONFIG_NET_POLL_CONTROLLER 1916#ifdef CONFIG_NET_POLL_CONTROLLER
1952/** 1917/**
1953 * sxgbe_poll_controller - entry point for polling receive by device 1918 * sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
2009 .ndo_set_rx_mode = sxgbe_set_rx_mode, 1974 .ndo_set_rx_mode = sxgbe_set_rx_mode,
2010 .ndo_tx_timeout = sxgbe_tx_timeout, 1975 .ndo_tx_timeout = sxgbe_tx_timeout,
2011 .ndo_do_ioctl = sxgbe_ioctl, 1976 .ndo_do_ioctl = sxgbe_ioctl,
2012 .ndo_set_config = sxgbe_config,
2013#ifdef CONFIG_NET_POLL_CONTROLLER 1977#ifdef CONFIG_NET_POLL_CONTROLLER
2014 .ndo_poll_controller = sxgbe_poll_controller, 1978 .ndo_poll_controller = sxgbe_poll_controller,
2015#endif 1979#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 56f8bf5a3f1b..81437d91df99 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -188,7 +188,6 @@
188 188
189/* L3/L4 function registers */ 189/* L3/L4 function registers */
190#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00 190#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
191#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
192#define SXGBE_CORE_L34_DATA_REG 0x0C04 191#define SXGBE_CORE_L34_DATA_REG 0x0C04
193 192
194/* ARP registers */ 193/* ARP registers */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 63d595fd3cc5..1e274045970f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
2248 } else { 2248 } else {
2249 net_dev->netdev_ops = &efx_farch_netdev_ops; 2249 net_dev->netdev_ops = &efx_farch_netdev_ops;
2250 } 2250 }
2251 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2251 net_dev->ethtool_ops = &efx_ethtool_ops;
2252 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2252 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2253 2253
2254 rtnl_lock(); 2254 rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 0de8b07c24c2..74739c4b9997 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1033 0 : ARRAY_SIZE(efx->rx_indir_table)); 1033 0 : ARRAY_SIZE(efx->rx_indir_table));
1034} 1034}
1035 1035
1036static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) 1036static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
1037{ 1037{
1038 struct efx_nic *efx = netdev_priv(net_dev); 1038 struct efx_nic *efx = netdev_priv(net_dev);
1039 1039
@@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, 1044static int efx_ethtool_set_rxfh(struct net_device *net_dev,
1045 const u32 *indir) 1045 const u32 *indir, const u8 *key)
1046{ 1046{
1047 struct efx_nic *efx = netdev_priv(net_dev); 1047 struct efx_nic *efx = netdev_priv(net_dev);
1048 1048
@@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = {
1125 .get_rxnfc = efx_ethtool_get_rxnfc, 1125 .get_rxnfc = efx_ethtool_get_rxnfc,
1126 .set_rxnfc = efx_ethtool_set_rxnfc, 1126 .set_rxnfc = efx_ethtool_set_rxnfc,
1127 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1127 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1128 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1128 .get_rxfh = efx_ethtool_get_rxfh,
1129 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1129 .set_rxfh = efx_ethtool_set_rxfh,
1130 .get_ts_info = efx_ethtool_get_ts_info, 1130 .get_ts_info = efx_ethtool_get_ts_info,
1131 .get_module_info = efx_ethtool_get_module_info, 1131 .get_module_info = efx_ethtool_get_module_info,
1132 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1132 .get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4d3f119b67b3..afb94aa2c15e 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,10 +66,17 @@
66#define EFX_USE_QWORD_IO 1 66#define EFX_USE_QWORD_IO 1
67#endif 67#endif
68 68
69/* Hardware issue requires that only 64-bit naturally aligned writes
70 * are seen by hardware. Its not strictly necessary to restrict to
71 * x86_64 arch, but done for safety since unusual write combining behaviour
72 * can break PIO.
73 */
74#ifdef CONFIG_X86_64
69/* PIO is a win only if write-combining is possible */ 75/* PIO is a win only if write-combining is possible */
70#ifdef ARCH_HAS_IOREMAP_WC 76#ifdef ARCH_HAS_IOREMAP_WC
71#define EFX_USE_PIO 1 77#define EFX_USE_PIO 1
72#endif 78#endif
79#endif
73 80
74#ifdef EFX_USE_QWORD_IO 81#ifdef EFX_USE_QWORD_IO
75static inline void _efx_writeq(struct efx_nic *efx, __le64 value, 82static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9a9205e77896..43d2e64546ed 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1633 1633
1634 ivi->vf = vf_i; 1634 ivi->vf = vf_i;
1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr); 1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1636 ivi->tx_rate = 0; 1636 ivi->max_tx_rate = 0;
1637 ivi->min_tx_rate = 0;
1637 tci = ntohs(vf->addr.tci); 1638 tci = ntohs(vf->addr.tci);
1638 ivi->vlan = tci & VLAN_VID_MASK; 1639 ivi->vlan = tci & VLAN_VID_MASK;
1639 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; 1640 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa9475300411..ede8dcca0ff3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
189 u8 buf[L1_CACHE_BYTES]; 189 u8 buf[L1_CACHE_BYTES];
190}; 190};
191 191
192/* Copy in explicit 64-bit writes. */
193static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
194{
195 u64 *src64 = src;
196 u64 __iomem *dest64 = dest;
197 size_t l64 = len / 8;
198 size_t i;
199
200 for (i = 0; i < l64; i++)
201 writeq(src64[i], &dest64[i]);
202}
203
192/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 204/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
193 * Advances piobuf pointer. Leaves additional data in the copy buffer. 205 * Advances piobuf pointer. Leaves additional data in the copy buffer.
194 */ 206 */
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
198{ 210{
199 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 211 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
200 212
201 memcpy_toio(*piobuf, data, block_len); 213 efx_memcpy_64(*piobuf, data, block_len);
202 *piobuf += block_len; 214 *piobuf += block_len;
203 len -= block_len; 215 len -= block_len;
204 216
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
230 if (copy_buf->used < sizeof(copy_buf->buf)) 242 if (copy_buf->used < sizeof(copy_buf->buf))
231 return; 243 return;
232 244
233 memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 245 efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
234 *piobuf += sizeof(copy_buf->buf); 246 *piobuf += sizeof(copy_buf->buf);
235 data += copy_to_buf; 247 data += copy_to_buf;
236 len -= copy_to_buf; 248 len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
245{ 257{
246 /* if there's anything in it, write the whole buffer, including junk */ 258 /* if there's anything in it, write the whole buffer, including junk */
247 if (copy_buf->used) 259 if (copy_buf->used)
248 memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 260 efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
249} 261}
250 262
251/* Traverse skb structure and copy fragments in to PIO buffer. 263/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
304 */ 316 */
305 BUILD_BUG_ON(L1_CACHE_BYTES > 317 BUILD_BUG_ON(L1_CACHE_BYTES >
306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 318 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
307 memcpy_toio(tx_queue->piobuf, skb->data, 319 efx_memcpy_64(tx_queue->piobuf, skb->data,
308 ALIGN(skb->len, L1_CACHE_BYTES)); 320 ALIGN(skb->len, L1_CACHE_BYTES));
309 } 321 }
310 322
311 EFX_POPULATE_QWORD_5(buffer->option, 323 EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index acbbe48a519c..a86339903b9b 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
1877 1877
1878 dev->netdev_ops = &sis190_netdev_ops; 1878 dev->netdev_ops = &sis190_netdev_ops;
1879 1879
1880 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1880 dev->ethtool_ops = &sis190_ethtool_ops;
1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1882 1882
1883 spin_lock_init(&tp->lock); 1883 spin_lock_init(&tp->lock);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index c7a4868571f9..6b33127ab352 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
318 318
319 /* The SMC91c92-specific entries in the device structure. */ 319 /* The SMC91c92-specific entries in the device structure. */
320 dev->netdev_ops = &smc_netdev_ops; 320 dev->netdev_ops = &smc_netdev_ops;
321 SET_ETHTOOL_OPS(dev, &ethtool_ops); 321 dev->ethtool_ops = &ethtool_ops;
322 dev->watchdog_timeo = TX_TIMEOUT; 322 dev->watchdog_timeo = TX_TIMEOUT;
323 323
324 smc->mii_if.dev = dev; 324 smc->mii_if.dev = dev;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2477 goto out_disable_resources; 2477 goto out_disable_resources;
2478 } 2478 }
2479 2479
2480 netif_carrier_off(dev);
2481
2480 retval = register_netdev(dev); 2482 retval = register_netdev(dev);
2481 if (retval) { 2483 if (retval) {
2482 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2484 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5f9cb85c8ef..c62e67f3c2f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
322 return -EBUSY; 322 return -EBUSY;
323 } 323 }
324 cmd->transceiver = XCVR_INTERNAL; 324 cmd->transceiver = XCVR_INTERNAL;
325 spin_lock_irq(&priv->lock);
326 rc = phy_ethtool_gset(phy, cmd); 325 rc = phy_ethtool_gset(phy, cmd);
327 spin_unlock_irq(&priv->lock);
328 return rc; 326 return rc;
329} 327}
330 328
@@ -431,8 +429,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
431 if (priv->pcs) /* FIXME */ 429 if (priv->pcs) /* FIXME */
432 return; 430 return;
433 431
434 spin_lock(&priv->lock);
435
436 pause->rx_pause = 0; 432 pause->rx_pause = 0;
437 pause->tx_pause = 0; 433 pause->tx_pause = 0;
438 pause->autoneg = priv->phydev->autoneg; 434 pause->autoneg = priv->phydev->autoneg;
@@ -442,7 +438,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
442 if (priv->flow_ctrl & FLOW_TX) 438 if (priv->flow_ctrl & FLOW_TX)
443 pause->tx_pause = 1; 439 pause->tx_pause = 1;
444 440
445 spin_unlock(&priv->lock);
446} 441}
447 442
448static int 443static int
@@ -457,8 +452,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
457 if (priv->pcs) /* FIXME */ 452 if (priv->pcs) /* FIXME */
458 return -EOPNOTSUPP; 453 return -EOPNOTSUPP;
459 454
460 spin_lock(&priv->lock);
461
462 if (pause->rx_pause) 455 if (pause->rx_pause)
463 new_pause |= FLOW_RX; 456 new_pause |= FLOW_RX;
464 if (pause->tx_pause) 457 if (pause->tx_pause)
@@ -473,7 +466,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
473 } else 466 } else
474 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex, 467 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
475 priv->flow_ctrl, priv->pause); 468 priv->flow_ctrl, priv->pause);
476 spin_unlock(&priv->lock);
477 return ret; 469 return ret;
478} 470}
479 471
@@ -784,5 +776,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
784 776
785void stmmac_set_ethtool_ops(struct net_device *netdev) 777void stmmac_set_ethtool_ops(struct net_device *netdev)
786{ 778{
787 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); 779 netdev->ethtool_ops = &stmmac_ethtool_ops;
788} 780}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f4841d2e8dc..057a1208e594 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1753,7 +1753,7 @@ static int stmmac_open(struct net_device *dev)
1753 } 1753 }
1754 1754
1755 /* Request the IRQ lines */ 1755 /* Request the IRQ lines */
1756 if (priv->lpi_irq != -ENXIO) { 1756 if (priv->lpi_irq > 0) {
1757 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 1757 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1758 dev->name, dev); 1758 dev->name, dev);
1759 if (unlikely(ret < 0)) { 1759 if (unlikely(ret < 0)) {
@@ -1813,7 +1813,7 @@ static int stmmac_release(struct net_device *dev)
1813 free_irq(dev->irq, dev); 1813 free_irq(dev->irq, dev);
1814 if (priv->wol_irq != dev->irq) 1814 if (priv->wol_irq != dev->irq)
1815 free_irq(priv->wol_irq, dev); 1815 free_irq(priv->wol_irq, dev);
1816 if (priv->lpi_irq != -ENXIO) 1816 if (priv->lpi_irq > 0)
1817 free_irq(priv->lpi_irq, dev); 1817 free_irq(priv->lpi_irq, dev);
1818 1818
1819 /* Stop TX/RX DMA and clear the descriptors */ 1819 /* Stop TX/RX DMA and clear the descriptors */
@@ -2212,27 +2212,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
2212 stmmac_tx_err(priv); 2212 stmmac_tx_err(priv);
2213} 2213}
2214 2214
2215/* Configuration changes (passed on by ifconfig) */
2216static int stmmac_config(struct net_device *dev, struct ifmap *map)
2217{
2218 if (dev->flags & IFF_UP) /* can't act on a running interface */
2219 return -EBUSY;
2220
2221 /* Don't allow changing the I/O address */
2222 if (map->base_addr != dev->base_addr) {
2223 pr_warn("%s: can't change I/O address\n", dev->name);
2224 return -EOPNOTSUPP;
2225 }
2226
2227 /* Don't allow changing the IRQ */
2228 if (map->irq != dev->irq) {
2229 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2230 return -EOPNOTSUPP;
2231 }
2232
2233 return 0;
2234}
2235
2236/** 2215/**
2237 * stmmac_set_rx_mode - entry point for multicast addressing 2216 * stmmac_set_rx_mode - entry point for multicast addressing
2238 * @dev : pointer to the device structure 2217 * @dev : pointer to the device structure
@@ -2598,7 +2577,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
2598 .ndo_set_rx_mode = stmmac_set_rx_mode, 2577 .ndo_set_rx_mode = stmmac_set_rx_mode,
2599 .ndo_tx_timeout = stmmac_tx_timeout, 2578 .ndo_tx_timeout = stmmac_tx_timeout,
2600 .ndo_do_ioctl = stmmac_ioctl, 2579 .ndo_do_ioctl = stmmac_ioctl,
2601 .ndo_set_config = stmmac_config,
2602#ifdef CONFIG_NET_POLL_CONTROLLER 2580#ifdef CONFIG_NET_POLL_CONTROLLER
2603 .ndo_poll_controller = stmmac_poll_controller, 2581 .ndo_poll_controller = stmmac_poll_controller,
2604#endif 2582#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb107823..a5b1e1b776fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
205 if (new_bus == NULL) 205 if (new_bus == NULL)
206 return -ENOMEM; 206 return -ENOMEM;
207 207
208 if (mdio_bus_data->irqs) 208 if (mdio_bus_data->irqs) {
209 irqlist = mdio_bus_data->irqs; 209 irqlist = mdio_bus_data->irqs;
210 else 210 } else {
211 for (addr = 0; addr < PHY_MAX_ADDR; addr++)
212 priv->mii_irq[addr] = PHY_POLL;
211 irqlist = priv->mii_irq; 213 irqlist = priv->mii_irq;
214 }
212 215
213#ifdef CONFIG_OF 216#ifdef CONFIG_OF
214 if (priv->device->of_node) 217 if (priv->device->of_node)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 46aef5108bea..ea7a65be1f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -237,10 +237,12 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
237 237
238 /* Get the MAC information */ 238 /* Get the MAC information */
239 priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); 239 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
240 if (priv->dev->irq == -ENXIO) { 240 if (priv->dev->irq < 0) {
241 pr_err("%s: ERROR: MAC IRQ configuration " 241 if (priv->dev->irq != -EPROBE_DEFER) {
242 "information not found\n", __func__); 242 netdev_err(priv->dev,
243 return -ENXIO; 243 "MAC IRQ configuration information not found\n");
244 }
245 return priv->dev->irq;
244 } 246 }
245 247
246 /* 248 /*
@@ -252,10 +254,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
252 * so the driver will continue to use the mac irq (ndev->irq) 254 * so the driver will continue to use the mac irq (ndev->irq)
253 */ 255 */
254 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 256 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
255 if (priv->wol_irq == -ENXIO) 257 if (priv->wol_irq < 0) {
258 if (priv->wol_irq == -EPROBE_DEFER)
259 return -EPROBE_DEFER;
256 priv->wol_irq = priv->dev->irq; 260 priv->wol_irq = priv->dev->irq;
261 }
257 262
258 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 263 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
264 if (priv->lpi_irq == -EPROBE_DEFER)
265 return -EPROBE_DEFER;
259 266
260 platform_set_drvdata(pdev, priv->dev); 267 platform_set_drvdata(pdev, priv->dev);
261 268
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 2ead87759ab4..38da73a2a886 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
2413 .get_ethtool_stats = bdx_get_ethtool_stats, 2413 .get_ethtool_stats = bdx_get_ethtool_stats,
2414 }; 2414 };
2415 2415
2416 SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops); 2416 netdev->ethtool_ops = &bdx_ethtool_ops;
2417} 2417}
2418 2418
2419/** 2419/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f369437..7399a52f7c26 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
313 313
314static struct mii_bus *cpmac_mii; 314static struct mii_bus *cpmac_mii;
315 315
316static int cpmac_config(struct net_device *dev, struct ifmap *map)
317{
318 if (dev->flags & IFF_UP)
319 return -EBUSY;
320
321 /* Don't allow changing the I/O address */
322 if (map->base_addr != dev->base_addr)
323 return -EOPNOTSUPP;
324
325 /* ignore other fields */
326 return 0;
327}
328
329static void cpmac_set_multicast_list(struct net_device *dev) 316static void cpmac_set_multicast_list(struct net_device *dev)
330{ 317{
331 struct netdev_hw_addr *ha; 318 struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
1100 .ndo_tx_timeout = cpmac_tx_timeout, 1087 .ndo_tx_timeout = cpmac_tx_timeout,
1101 .ndo_set_rx_mode = cpmac_set_multicast_list, 1088 .ndo_set_rx_mode = cpmac_set_multicast_list,
1102 .ndo_do_ioctl = cpmac_ioctl, 1089 .ndo_do_ioctl = cpmac_ioctl,
1103 .ndo_set_config = cpmac_config,
1104 .ndo_change_mtu = eth_change_mtu, 1090 .ndo_change_mtu = eth_change_mtu,
1105 .ndo_validate_addr = eth_validate_addr, 1091 .ndo_validate_addr = eth_validate_addr,
1106 .ndo_set_mac_address = eth_mac_addr, 1092 .ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 148da9ae8366..aa8bf45e53dc 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -29,6 +29,8 @@
29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) 29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) 30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
31 31
32#define GMII_SEL_MODE_MASK 0x3
33
32struct cpsw_phy_sel_priv { 34struct cpsw_phy_sel_priv {
33 struct device *dev; 35 struct device *dev;
34 u32 __iomem *gmii_sel; 36 u32 __iomem *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
65 break; 67 break;
66 }; 68 };
67 69
68 mask = 0x3 << (slave * 2) | BIT(slave + 6); 70 mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
69 mode <<= slave * 2; 71 mode <<= slave * 2;
70 72
71 if (priv->rmii_clock_external) { 73 if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
81 writel(reg, priv->gmii_sel); 83 writel(reg, priv->gmii_sel);
82} 84}
83 85
86static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
87 phy_interface_t phy_mode, int slave)
88{
89 u32 reg;
90 u32 mask;
91 u32 mode = 0;
92
93 reg = readl(priv->gmii_sel);
94
95 switch (phy_mode) {
96 case PHY_INTERFACE_MODE_RMII:
97 mode = AM33XX_GMII_SEL_MODE_RMII;
98 break;
99
100 case PHY_INTERFACE_MODE_RGMII:
101 case PHY_INTERFACE_MODE_RGMII_ID:
102 case PHY_INTERFACE_MODE_RGMII_RXID:
103 case PHY_INTERFACE_MODE_RGMII_TXID:
104 mode = AM33XX_GMII_SEL_MODE_RGMII;
105 break;
106
107 case PHY_INTERFACE_MODE_MII:
108 default:
109 mode = AM33XX_GMII_SEL_MODE_MII;
110 break;
111 };
112
113 switch (slave) {
114 case 0:
115 mask = GMII_SEL_MODE_MASK;
116 break;
117 case 1:
118 mask = GMII_SEL_MODE_MASK << 4;
119 mode <<= 4;
120 break;
121 default:
122 dev_err(priv->dev, "invalid slave number...\n");
123 return;
124 }
125
126 if (priv->rmii_clock_external)
127 dev_err(priv->dev, "RMII External clock is not supported\n");
128
129 reg &= ~mask;
130 reg |= mode;
131
132 writel(reg, priv->gmii_sel);
133}
134
84static struct platform_driver cpsw_phy_sel_driver; 135static struct platform_driver cpsw_phy_sel_driver;
85static int match(struct device *dev, void *data) 136static int match(struct device *dev, void *data)
86{ 137{
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
112 .compatible = "ti,am3352-cpsw-phy-sel", 163 .compatible = "ti,am3352-cpsw-phy-sel",
113 .data = &cpsw_gmii_sel_am3352, 164 .data = &cpsw_gmii_sel_am3352,
114 }, 165 },
166 {
167 .compatible = "ti,dra7xx-cpsw-phy-sel",
168 .data = &cpsw_gmii_sel_dra7xx,
169 },
170 {
171 .compatible = "ti,am43xx-cpsw-phy-sel",
172 .data = &cpsw_gmii_sel_am3352,
173 },
115 {} 174 {}
116}; 175};
117MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table); 176MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
132 return -ENOMEM; 191 return -ENOMEM;
133 } 192 }
134 193
194 priv->dev = &pdev->dev;
135 priv->cpsw_phy_sel = of_id->data; 195 priv->cpsw_phy_sel = of_id->data;
136 196
137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel"); 197 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7ebc812..ff380dac6629 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -143,13 +143,13 @@ do { \
143 u32 i; \ 143 u32 i; \
144 for (i = 0; i < priv->num_irqs; i++) \ 144 for (i = 0; i < priv->num_irqs; i++) \
145 enable_irq(priv->irqs_table[i]); \ 145 enable_irq(priv->irqs_table[i]); \
146 } while (0); 146 } while (0)
147#define cpsw_disable_irq(priv) \ 147#define cpsw_disable_irq(priv) \
148 do { \ 148 do { \
149 u32 i; \ 149 u32 i; \
150 for (i = 0; i < priv->num_irqs; i++) \ 150 for (i = 0; i < priv->num_irqs; i++) \
151 disable_irq_nosync(priv->irqs_table[i]); \ 151 disable_irq_nosync(priv->irqs_table[i]); \
152 } while (0); 152 } while (0)
153 153
154#define cpsw_slave_index(priv) \ 154#define cpsw_slave_index(priv) \
155 ((priv->data.dual_emac) ? priv->emac_port : \ 155 ((priv->data.dual_emac) ? priv->emac_port : \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
248#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ 248#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
249#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ 249#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
250#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ 250#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
251#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */ 251#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
252#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
252#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ 253#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
253#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ 254#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
254#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ 255#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
255#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ 256#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
256#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ 257#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
257 258
258#define CTRL_TS_BITS \ 259#define CTRL_V2_TS_BITS \
259 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \ 260 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
260 TS_ANNEX_D_EN | TS_LTYPE1_EN) 261 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
262
263#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
264#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
265#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
266
261 267
262#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN) 268#define CTRL_V3_TS_BITS \
263#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN) 269 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
264#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN) 270 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
271 TS_LTYPE1_EN)
272
273#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
274#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
275#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
265 276
266/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ 277/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
267#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ 278#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1376 slave = &priv->slaves[priv->data.active_slave]; 1387 slave = &priv->slaves[priv->data.active_slave];
1377 1388
1378 ctrl = slave_read(slave, CPSW2_CONTROL); 1389 ctrl = slave_read(slave, CPSW2_CONTROL);
1379 ctrl &= ~CTRL_ALL_TS_MASK; 1390 switch (priv->version) {
1391 case CPSW_VERSION_2:
1392 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1380 1393
1381 if (priv->cpts->tx_enable) 1394 if (priv->cpts->tx_enable)
1382 ctrl |= CTRL_TX_TS_BITS; 1395 ctrl |= CTRL_V2_TX_TS_BITS;
1383 1396
1384 if (priv->cpts->rx_enable) 1397 if (priv->cpts->rx_enable)
1385 ctrl |= CTRL_RX_TS_BITS; 1398 ctrl |= CTRL_V2_RX_TS_BITS;
1399 break;
1400 case CPSW_VERSION_3:
1401 default:
1402 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1403
1404 if (priv->cpts->tx_enable)
1405 ctrl |= CTRL_V3_TX_TS_BITS;
1406
1407 if (priv->cpts->rx_enable)
1408 ctrl |= CTRL_V3_RX_TS_BITS;
1409 break;
1410 }
1386 1411
1387 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 1412 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1388 1413
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1398 struct hwtstamp_config cfg; 1423 struct hwtstamp_config cfg;
1399 1424
1400 if (priv->version != CPSW_VERSION_1 && 1425 if (priv->version != CPSW_VERSION_1 &&
1401 priv->version != CPSW_VERSION_2) 1426 priv->version != CPSW_VERSION_2 &&
1427 priv->version != CPSW_VERSION_3)
1402 return -EOPNOTSUPP; 1428 return -EOPNOTSUPP;
1403 1429
1404 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1430 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1443 cpsw_hwtstamp_v1(priv); 1469 cpsw_hwtstamp_v1(priv);
1444 break; 1470 break;
1445 case CPSW_VERSION_2: 1471 case CPSW_VERSION_2:
1472 case CPSW_VERSION_3:
1446 cpsw_hwtstamp_v2(priv); 1473 cpsw_hwtstamp_v2(priv);
1447 break; 1474 break;
1448 default: 1475 default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1459 struct hwtstamp_config cfg; 1486 struct hwtstamp_config cfg;
1460 1487
1461 if (priv->version != CPSW_VERSION_1 && 1488 if (priv->version != CPSW_VERSION_1 &&
1462 priv->version != CPSW_VERSION_2) 1489 priv->version != CPSW_VERSION_2 &&
1490 priv->version != CPSW_VERSION_3)
1463 return -EOPNOTSUPP; 1491 return -EOPNOTSUPP;
1464 1492
1465 cfg.flags = 0; 1493 cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1780 return -EINVAL; 1808 return -EINVAL;
1781 1809
1782 if (of_property_read_u32(node, "slaves", &prop)) { 1810 if (of_property_read_u32(node, "slaves", &prop)) {
1783 pr_err("Missing slaves property in the DT.\n"); 1811 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
1784 return -EINVAL; 1812 return -EINVAL;
1785 } 1813 }
1786 data->slaves = prop; 1814 data->slaves = prop;
1787 1815
1788 if (of_property_read_u32(node, "active_slave", &prop)) { 1816 if (of_property_read_u32(node, "active_slave", &prop)) {
1789 pr_err("Missing active_slave property in the DT.\n"); 1817 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
1790 return -EINVAL; 1818 return -EINVAL;
1791 } 1819 }
1792 data->active_slave = prop; 1820 data->active_slave = prop;
1793 1821
1794 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1822 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1795 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1823 dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
1796 return -EINVAL; 1824 return -EINVAL;
1797 } 1825 }
1798 data->cpts_clock_mult = prop; 1826 data->cpts_clock_mult = prop;
1799 1827
1800 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { 1828 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1801 pr_err("Missing cpts_clock_shift property in the DT.\n"); 1829 dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
1802 return -EINVAL; 1830 return -EINVAL;
1803 } 1831 }
1804 data->cpts_clock_shift = prop; 1832 data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1810 return -ENOMEM; 1838 return -ENOMEM;
1811 1839
1812 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 1840 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
1813 pr_err("Missing cpdma_channels property in the DT.\n"); 1841 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
1814 return -EINVAL; 1842 return -EINVAL;
1815 } 1843 }
1816 data->channels = prop; 1844 data->channels = prop;
1817 1845
1818 if (of_property_read_u32(node, "ale_entries", &prop)) { 1846 if (of_property_read_u32(node, "ale_entries", &prop)) {
1819 pr_err("Missing ale_entries property in the DT.\n"); 1847 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
1820 return -EINVAL; 1848 return -EINVAL;
1821 } 1849 }
1822 data->ale_entries = prop; 1850 data->ale_entries = prop;
1823 1851
1824 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1852 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
1825 pr_err("Missing bd_ram_size property in the DT.\n"); 1853 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
1826 return -EINVAL; 1854 return -EINVAL;
1827 } 1855 }
1828 data->bd_ram_size = prop; 1856 data->bd_ram_size = prop;
1829 1857
1830 if (of_property_read_u32(node, "rx_descs", &prop)) { 1858 if (of_property_read_u32(node, "rx_descs", &prop)) {
1831 pr_err("Missing rx_descs property in the DT.\n"); 1859 dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
1832 return -EINVAL; 1860 return -EINVAL;
1833 } 1861 }
1834 data->rx_descs = prop; 1862 data->rx_descs = prop;
1835 1863
1836 if (of_property_read_u32(node, "mac_control", &prop)) { 1864 if (of_property_read_u32(node, "mac_control", &prop)) {
1837 pr_err("Missing mac_control property in the DT.\n"); 1865 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
1838 return -EINVAL; 1866 return -EINVAL;
1839 } 1867 }
1840 data->mac_control = prop; 1868 data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1848 ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 1876 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
1849 /* We do not want to force this, as in some cases may not have child */ 1877 /* We do not want to force this, as in some cases may not have child */
1850 if (ret) 1878 if (ret)
1851 pr_warn("Doesn't have any child node\n"); 1879 dev_warn(&pdev->dev, "Doesn't have any child node\n");
1852 1880
1853 for_each_child_of_node(node, slave_node) { 1881 for_each_child_of_node(node, slave_node) {
1854 struct cpsw_slave_data *slave_data = data->slave_data + i; 1882 struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,7 +1893,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1865 1893
1866 parp = of_get_property(slave_node, "phy_id", &lenp); 1894 parp = of_get_property(slave_node, "phy_id", &lenp);
1867 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1895 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1868 pr_err("Missing slave[%d] phy_id property\n", i); 1896 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
1869 return -EINVAL; 1897 return -EINVAL;
1870 } 1898 }
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1899 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
@@ -1885,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1885 1913
1886 slave_data->phy_if = of_get_phy_mode(slave_node); 1914 slave_data->phy_if = of_get_phy_mode(slave_node);
1887 if (slave_data->phy_if < 0) { 1915 if (slave_data->phy_if < 0) {
1888 pr_err("Missing or malformed slave[%d] phy-mode property\n", 1916 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
1889 i); 1917 i);
1890 return slave_data->phy_if; 1918 return slave_data->phy_if;
1891 } 1919 }
1892 1920
1893 if (data->dual_emac) { 1921 if (data->dual_emac) {
1894 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1922 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
1895 &prop)) { 1923 &prop)) {
1896 pr_err("Missing dual_emac_res_vlan in DT.\n"); 1924 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
1897 slave_data->dual_emac_res_vlan = i+1; 1925 slave_data->dual_emac_res_vlan = i+1;
1898 pr_err("Using %d as Reserved VLAN for %d slave\n", 1926 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
1899 slave_data->dual_emac_res_vlan, i); 1927 slave_data->dual_emac_res_vlan, i);
1900 } else { 1928 } else {
1901 slave_data->dual_emac_res_vlan = prop; 1929 slave_data->dual_emac_res_vlan = prop;
1902 } 1930 }
@@ -1920,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1920 1948
1921 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1949 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1922 if (!ndev) { 1950 if (!ndev) {
1923 pr_err("cpsw: error allocating net_device\n"); 1951 dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
1924 return -ENOMEM; 1952 return -ENOMEM;
1925 } 1953 }
1926 1954
@@ -1936,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1936 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1964 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
1937 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 1965 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
1938 ETH_ALEN); 1966 ETH_ALEN);
1939 pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); 1967 dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
1940 } else { 1968 } else {
1941 random_ether_addr(priv_sl2->mac_addr); 1969 random_ether_addr(priv_sl2->mac_addr);
1942 pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); 1970 dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
1943 } 1971 }
1944 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 1972 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
1945 1973
@@ -1970,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1970 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1998 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1971 1999
1972 ndev->netdev_ops = &cpsw_netdev_ops; 2000 ndev->netdev_ops = &cpsw_netdev_ops;
1973 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2001 ndev->ethtool_ops = &cpsw_ethtool_ops;
1974 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2002 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
1975 2003
1976 /* register the network device */ 2004 /* register the network device */
1977 SET_NETDEV_DEV(ndev, &pdev->dev); 2005 SET_NETDEV_DEV(ndev, &pdev->dev);
1978 ret = register_netdev(ndev); 2006 ret = register_netdev(ndev);
1979 if (ret) { 2007 if (ret) {
1980 pr_err("cpsw: error registering net device\n"); 2008 dev_err(&pdev->dev, "cpsw: error registering net device\n");
1981 free_netdev(ndev); 2009 free_netdev(ndev);
1982 ret = -ENODEV; 2010 ret = -ENODEV;
1983 } 2011 }
@@ -1999,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
1999 2027
2000 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 2028 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2001 if (!ndev) { 2029 if (!ndev) {
2002 pr_err("error allocating net_device\n"); 2030 dev_err(&pdev->dev, "error allocating net_device\n");
2003 return -ENOMEM; 2031 return -ENOMEM;
2004 } 2032 }
2005 2033
@@ -2014,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
2014 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 2042 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
2015 priv->irq_enabled = true; 2043 priv->irq_enabled = true;
2016 if (!priv->cpts) { 2044 if (!priv->cpts) {
2017 pr_err("error allocating cpts\n"); 2045 dev_err(&pdev->dev, "error allocating cpts\n");
2018 goto clean_ndev_ret; 2046 goto clean_ndev_ret;
2019 } 2047 }
2020 2048
@@ -2027,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
2027 pinctrl_pm_select_default_state(&pdev->dev); 2055 pinctrl_pm_select_default_state(&pdev->dev);
2028 2056
2029 if (cpsw_probe_dt(&priv->data, pdev)) { 2057 if (cpsw_probe_dt(&priv->data, pdev)) {
2030 pr_err("cpsw: platform data missing\n"); 2058 dev_err(&pdev->dev, "cpsw: platform data missing\n");
2031 ret = -ENODEV; 2059 ret = -ENODEV;
2032 goto clean_runtime_disable_ret; 2060 goto clean_runtime_disable_ret;
2033 } 2061 }
@@ -2035,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
2035 2063
2036 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 2064 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2037 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 2065 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2038 pr_info("Detected MACID = %pM\n", priv->mac_addr); 2066 dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
2039 } else { 2067 } else {
2040 eth_random_addr(priv->mac_addr); 2068 eth_random_addr(priv->mac_addr);
2041 pr_info("Random MACID = %pM\n", priv->mac_addr); 2069 dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
2042 } 2070 }
2043 2071
2044 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 2072 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2199,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
2199 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2227 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2200 2228
2201 ndev->netdev_ops = &cpsw_netdev_ops; 2229 ndev->netdev_ops = &cpsw_netdev_ops;
2202 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2230 ndev->ethtool_ops = &cpsw_ethtool_ops;
2203 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2231 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2204 2232
2205 /* register the network device */ 2233 /* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 243513980b51..6b56f85951e5 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
236 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); 236 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
237} 237}
238 238
239#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk" 239static void cpts_clk_init(struct device *dev, struct cpts *cpts)
240
241static void cpts_clk_init(struct cpts *cpts)
242{ 240{
243 cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME); 241 cpts->refclk = devm_clk_get(dev, "cpts");
244 if (IS_ERR(cpts->refclk)) { 242 if (IS_ERR(cpts->refclk)) {
245 pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME); 243 dev_err(dev, "Failed to get cpts refclk\n");
246 cpts->refclk = NULL; 244 cpts->refclk = NULL;
247 return; 245 return;
248 } 246 }
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
252static void cpts_clk_release(struct cpts *cpts) 250static void cpts_clk_release(struct cpts *cpts)
253{ 251{
254 clk_disable(cpts->refclk); 252 clk_disable(cpts->refclk);
255 clk_put(cpts->refclk);
256} 253}
257 254
258static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 255static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
390 for (i = 0; i < CPTS_MAX_EVENTS; i++) 387 for (i = 0; i < CPTS_MAX_EVENTS; i++)
391 list_add(&cpts->pool_data[i].list, &cpts->pool); 388 list_add(&cpts->pool_data[i].list, &cpts->pool);
392 389
393 cpts_clk_init(cpts); 390 cpts_clk_init(dev, cpts);
394 cpts_write32(cpts, CPTS_EN, control); 391 cpts_write32(cpts, CPTS_EN, control);
395 cpts_write32(cpts, TS_PEND_EN, int_enable); 392 cpts_write32(cpts, TS_PEND_EN, int_enable);
396 393
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 88ef27067bf2..4a000f6dd6fc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
158 int bitmap_size; 158 int bitmap_size;
159 struct cpdma_desc_pool *pool; 159 struct cpdma_desc_pool *pool;
160 160
161 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
162 if (!pool) 162 if (!pool)
163 return NULL; 163 goto fail;
164 164
165 spin_lock_init(&pool->lock); 165 spin_lock_init(&pool->lock);
166 166
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
170 pool->num_desc = size / pool->desc_size; 170 pool->num_desc = size / pool->desc_size;
171 171
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
173 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
174 if (!pool->bitmap) 174 if (!pool->bitmap)
175 goto fail; 175 goto fail;
176 176
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
187 187
188 if (pool->iomap) 188 if (pool->iomap)
189 return pool; 189 return pool;
190
191fail: 190fail:
192 kfree(pool->bitmap);
193 kfree(pool);
194 return NULL; 191 return NULL;
195} 192}
196 193
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
203 200
204 spin_lock_irqsave(&pool->lock, flags); 201 spin_lock_irqsave(&pool->lock, flags);
205 WARN_ON(pool->used_desc); 202 WARN_ON(pool->used_desc);
206 kfree(pool->bitmap);
207 if (pool->cpumap) { 203 if (pool->cpumap) {
208 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 204 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
209 pool->phys); 205 pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
211 iounmap(pool->iomap); 207 iounmap(pool->iomap);
212 } 208 }
213 spin_unlock_irqrestore(&pool->lock, flags); 209 spin_unlock_irqrestore(&pool->lock, flags);
214 kfree(pool);
215} 210}
216 211
217static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 212static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276{ 271{
277 struct cpdma_ctlr *ctlr; 272 struct cpdma_ctlr *ctlr;
278 273
279 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); 274 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
280 if (!ctlr) 275 if (!ctlr)
281 return NULL; 276 return NULL;
282 277
@@ -290,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
290 ctlr->params.desc_hw_addr, 285 ctlr->params.desc_hw_addr,
291 ctlr->params.desc_mem_size, 286 ctlr->params.desc_mem_size,
292 ctlr->params.desc_align); 287 ctlr->params.desc_align);
293 if (!ctlr->pool) { 288 if (!ctlr->pool)
294 kfree(ctlr);
295 return NULL; 289 return NULL;
296 }
297 290
298 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 291 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
299 ctlr->num_chan = CPDMA_MAX_CHANNELS; 292 ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -468,7 +461,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
468 461
469 cpdma_desc_pool_destroy(ctlr->pool); 462 cpdma_desc_pool_destroy(ctlr->pool);
470 spin_unlock_irqrestore(&ctlr->lock, flags); 463 spin_unlock_irqrestore(&ctlr->lock, flags);
471 kfree(ctlr);
472 return ret; 464 return ret;
473} 465}
474EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 466EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +499,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
507 cpdma_handler_fn handler) 499 cpdma_handler_fn handler)
508{ 500{
509 struct cpdma_chan *chan; 501 struct cpdma_chan *chan;
510 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 502 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
511 unsigned long flags; 503 unsigned long flags;
512 504
513 if (__chan_linear(chan_num) >= ctlr->num_chan) 505 if (__chan_linear(chan_num) >= ctlr->num_chan)
514 return NULL; 506 return NULL;
515 507
516 ret = -ENOMEM; 508 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
517 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
518 if (!chan) 509 if (!chan)
519 goto err_chan_alloc; 510 return ERR_PTR(-ENOMEM);
520 511
521 spin_lock_irqsave(&ctlr->lock, flags); 512 spin_lock_irqsave(&ctlr->lock, flags);
522 ret = -EBUSY; 513 if (ctlr->channels[chan_num]) {
523 if (ctlr->channels[chan_num]) 514 spin_unlock_irqrestore(&ctlr->lock, flags);
524 goto err_chan_busy; 515 devm_kfree(ctlr->dev, chan);
516 return ERR_PTR(-EBUSY);
517 }
525 518
526 chan->ctlr = ctlr; 519 chan->ctlr = ctlr;
527 chan->state = CPDMA_STATE_IDLE; 520 chan->state = CPDMA_STATE_IDLE;
@@ -551,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
551 ctlr->channels[chan_num] = chan; 544 ctlr->channels[chan_num] = chan;
552 spin_unlock_irqrestore(&ctlr->lock, flags); 545 spin_unlock_irqrestore(&ctlr->lock, flags);
553 return chan; 546 return chan;
554
555err_chan_busy:
556 spin_unlock_irqrestore(&ctlr->lock, flags);
557 kfree(chan);
558err_chan_alloc:
559 return ERR_PTR(ret);
560} 547}
561EXPORT_SYMBOL_GPL(cpdma_chan_create); 548EXPORT_SYMBOL_GPL(cpdma_chan_create);
562 549
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8f0e69ce07ca..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, 1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
1568 res_num))) { 1568 res_num))) {
1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) { 1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) {
1570 dev_err(emac_dev, "Request IRQ %d\n", irq_num);
1571 if (request_irq(irq_num, emac_irq, 0, ndev->name, 1570 if (request_irq(irq_num, emac_irq, 0, ndev->name,
1572 ndev)) { 1571 ndev)) {
1573 dev_err(emac_dev, 1572 dev_err(emac_dev,
@@ -1865,7 +1864,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
1865 struct emac_priv *priv; 1864 struct emac_priv *priv;
1866 unsigned long hw_ram_addr; 1865 unsigned long hw_ram_addr;
1867 struct emac_platform_data *pdata; 1866 struct emac_platform_data *pdata;
1868 struct device *emac_dev;
1869 struct cpdma_params dma_params; 1867 struct cpdma_params dma_params;
1870 struct clk *emac_clk; 1868 struct clk *emac_clk;
1871 unsigned long emac_bus_frequency; 1869 unsigned long emac_bus_frequency;
@@ -1911,7 +1909,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
1911 priv->coal_intvl = 0; 1909 priv->coal_intvl = 0;
1912 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); 1910 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
1913 1911
1914 emac_dev = &ndev->dev;
1915 /* Get EMAC platform data */ 1912 /* Get EMAC platform data */
1916 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1913 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1917 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1914 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1930 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; 1927 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
1931 1928
1932 memset(&dma_params, 0, sizeof(dma_params)); 1929 memset(&dma_params, 0, sizeof(dma_params));
1933 dma_params.dev = emac_dev; 1930 dma_params.dev = &pdev->dev;
1934 dma_params.dmaregs = priv->emac_base; 1931 dma_params.dmaregs = priv->emac_base;
1935 dma_params.rxthresh = priv->emac_base + 0x120; 1932 dma_params.rxthresh = priv->emac_base + 0x120;
1936 dma_params.rxfree = priv->emac_base + 0x140; 1933 dma_params.rxfree = priv->emac_base + 0x140;
@@ -1980,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1980 } 1977 }
1981 1978
1982 ndev->netdev_ops = &emac_netdev_ops; 1979 ndev->netdev_ops = &emac_netdev_ops;
1983 SET_ETHTOOL_OPS(ndev, &ethtool_ops); 1980 ndev->ethtool_ops = &ethtool_ops;
1984 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); 1981 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
1985 1982
1986 /* register the network device */ 1983 /* register the network device */
@@ -1994,7 +1991,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1994 1991
1995 1992
1996 if (netif_msg_probe(priv)) { 1993 if (netif_msg_probe(priv)) {
1997 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ 1994 dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
1998 "(regs: %p, irq: %d)\n", 1995 "(regs: %p, irq: %d)\n",
1999 (void *)priv->emac_base_phys, ndev->irq); 1996 (void *)priv->emac_base_phys, ndev->irq);
2000 } 1997 }
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9dec5d82..735dc53d4b01 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
303 return -EINVAL; 303 return -EINVAL;
304 304
305 if (of_property_read_u32(node, "bus_freq", &prop)) { 305 if (of_property_read_u32(node, "bus_freq", &prop)) {
306 pr_err("Missing bus_freq property in the DT.\n"); 306 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
307 return -EINVAL; 307 return -EINVAL;
308 } 308 }
309 data->bus_freq = prop; 309 data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
321 struct phy_device *phy; 321 struct phy_device *phy;
322 int ret, addr; 322 int ret, addr;
323 323
324 data = kzalloc(sizeof(*data), GFP_KERNEL); 324 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
325 if (!data) 325 if (!data)
326 return -ENOMEM; 326 return -ENOMEM;
327 327
328 data->bus = mdiobus_alloc(); 328 data->bus = devm_mdiobus_alloc(dev);
329 if (!data->bus) { 329 if (!data->bus) {
330 dev_err(dev, "failed to alloc mii bus\n"); 330 dev_err(dev, "failed to alloc mii bus\n");
331 ret = -ENOMEM; 331 return -ENOMEM;
332 goto bail_out;
333 } 332 }
334 333
335 if (dev->of_node) { 334 if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
349 data->bus->parent = dev; 348 data->bus->parent = dev;
350 data->bus->priv = data; 349 data->bus->priv = data;
351 350
352 /* Select default pin state */
353 pinctrl_pm_select_default_state(&pdev->dev);
354
355 pm_runtime_enable(&pdev->dev); 351 pm_runtime_enable(&pdev->dev);
356 pm_runtime_get_sync(&pdev->dev); 352 pm_runtime_get_sync(&pdev->dev);
357 data->clk = clk_get(&pdev->dev, "fck"); 353 data->clk = devm_clk_get(dev, "fck");
358 if (IS_ERR(data->clk)) { 354 if (IS_ERR(data->clk)) {
359 dev_err(dev, "failed to get device clock\n"); 355 dev_err(dev, "failed to get device clock\n");
360 ret = PTR_ERR(data->clk); 356 ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
367 spin_lock_init(&data->lock); 363 spin_lock_init(&data->lock);
368 364
369 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 365 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
370 if (!res) { 366 data->regs = devm_ioremap_resource(dev, res);
371 dev_err(dev, "could not find register map resource\n"); 367 if (IS_ERR(data->regs)) {
372 ret = -ENOENT; 368 ret = PTR_ERR(data->regs);
373 goto bail_out;
374 }
375
376 res = devm_request_mem_region(dev, res->start, resource_size(res),
377 dev_name(dev));
378 if (!res) {
379 dev_err(dev, "could not allocate register map resource\n");
380 ret = -ENXIO;
381 goto bail_out;
382 }
383
384 data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
385 if (!data->regs) {
386 dev_err(dev, "could not map mdio registers\n");
387 ret = -ENOMEM;
388 goto bail_out; 369 goto bail_out;
389 } 370 }
390 371
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
406 return 0; 387 return 0;
407 388
408bail_out: 389bail_out:
409 if (data->bus)
410 mdiobus_free(data->bus);
411
412 if (data->clk)
413 clk_put(data->clk);
414 pm_runtime_put_sync(&pdev->dev); 390 pm_runtime_put_sync(&pdev->dev);
415 pm_runtime_disable(&pdev->dev); 391 pm_runtime_disable(&pdev->dev);
416 392
417 kfree(data);
418
419 return ret; 393 return ret;
420} 394}
421 395
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423{ 397{
424 struct davinci_mdio_data *data = platform_get_drvdata(pdev); 398 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
425 399
426 if (data->bus) { 400 if (data->bus)
427 mdiobus_unregister(data->bus); 401 mdiobus_unregister(data->bus);
428 mdiobus_free(data->bus);
429 }
430 402
431 if (data->clk)
432 clk_put(data->clk);
433 pm_runtime_put_sync(&pdev->dev); 403 pm_runtime_put_sync(&pdev->dev);
434 pm_runtime_disable(&pdev->dev); 404 pm_runtime_disable(&pdev->dev);
435 405
436 kfree(data);
437
438 return 0; 406 return 0;
439} 407}
440 408
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 449011b0e007..14389f841d43 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2192,7 +2192,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2192{ 2192{
2193 int ret; 2193 int ret;
2194 int i; 2194 int i;
2195 int nz_addr = 0;
2196 struct net_device *dev; 2195 struct net_device *dev;
2197 struct tile_net_priv *priv; 2196 struct tile_net_priv *priv;
2198 2197
@@ -2212,7 +2211,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2212 2211
2213 /* Initialize "priv". */ 2212 /* Initialize "priv". */
2214 priv = netdev_priv(dev); 2213 priv = netdev_priv(dev);
2215 memset(priv, 0, sizeof(*priv));
2216 priv->dev = dev; 2214 priv->dev = dev;
2217 priv->channel = -1; 2215 priv->channel = -1;
2218 priv->loopify_channel = -1; 2216 priv->loopify_channel = -1;
@@ -2223,15 +2221,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2223 * be done before the device is opened. If the MAC is all zeroes, 2221 * be done before the device is opened. If the MAC is all zeroes,
2224 * we use a random address, since we're probably on the simulator. 2222 * we use a random address, since we're probably on the simulator.
2225 */ 2223 */
2226 for (i = 0; i < 6; i++) 2224 if (!is_zero_ether_addr(mac))
2227 nz_addr |= mac[i]; 2225 ether_addr_copy(dev->dev_addr, mac);
2228 2226 else
2229 if (nz_addr) {
2230 memcpy(dev->dev_addr, mac, ETH_ALEN);
2231 dev->addr_len = 6;
2232 } else {
2233 eth_hw_addr_random(dev); 2227 eth_hw_addr_random(dev);
2234 }
2235 2228
2236 /* Register the network device. */ 2229 /* Register the network device. */
2237 ret = register_netdev(dev); 2230 ret = register_netdev(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d899d0072ae0..bb7992804664 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1561,7 +1561,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1561 * alloc netdev 1561 * alloc netdev
1562 */ 1562 */
1563 *netdev = alloc_etherdev(sizeof(struct gelic_port)); 1563 *netdev = alloc_etherdev(sizeof(struct gelic_port));
1564 if (!netdev) { 1564 if (!*netdev) {
1565 kfree(card->unalign); 1565 kfree(card->unalign);
1566 return NULL; 1566 return NULL;
1567 } 1567 }
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2b4474..f66ddaee0c87 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
19 19
20config VIA_RHINE 20config VIA_RHINE
21 tristate "VIA Rhine support" 21 tristate "VIA Rhine support"
22 depends on PCI 22 depends on (PCI || USE_OF)
23 select CRC32 23 select CRC32
24 select MII 24 select MII
25 ---help--- 25 ---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b72bb2..2d72f96a9e2c 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
94#include <linux/ioport.h> 94#include <linux/ioport.h>
95#include <linux/interrupt.h> 95#include <linux/interrupt.h>
96#include <linux/pci.h> 96#include <linux/pci.h>
97#include <linux/of_address.h>
98#include <linux/of_device.h>
99#include <linux/of_irq.h>
100#include <linux/platform_device.h>
97#include <linux/dma-mapping.h> 101#include <linux/dma-mapping.h>
98#include <linux/netdevice.h> 102#include <linux/netdevice.h>
99#include <linux/etherdevice.h> 103#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
116static const char version[] = 120static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 121 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 122
119/* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121#ifdef CONFIG_VIA_RHINE_MMIO
122#define USE_MMIO
123#else
124#endif
125
126MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 123MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 124MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128MODULE_LICENSE("GPL"); 125MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 257 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 258 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */ 259 rqRhineI = 0x0100, /* See comment below */
260 rqIntPHY = 0x0200, /* Integrated PHY */
261 rqMgmt = 0x0400, /* Management adapter */
262 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
263 * switched from PIO mode to MMIO
264 * (only applies to PCI)
265 */
263}; 266};
264/* 267/*
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
279}; 282};
280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 284
285/* OpenFirmware identifiers for platform-bus devices
286 * The .data field is currently only used to store quirks
287 */
288static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
289static struct of_device_id rhine_of_tbl[] = {
290 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
291 { } /* terminate list */
292};
293MODULE_DEVICE_TABLE(of, rhine_of_tbl);
282 294
283/* Offsets to the device registers. */ 295/* Offsets to the device registers. */
284enum register_offsets { 296enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
338 BCR1_MED1=0x80, /* for VT6102 */ 350 BCR1_MED1=0x80, /* for VT6102 */
339}; 351};
340 352
341#ifdef USE_MMIO
342/* Registers we check that mmio and reg are the same. */ 353/* Registers we check that mmio and reg are the same. */
343static const int mmio_verify_registers[] = { 354static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 355 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 0 356 0
346}; 357};
347#endif
348 358
349/* Bits in the interrupt status/mask registers. */ 359/* Bits in the interrupt status/mask registers. */
350enum intr_status_bits { 360enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
446 unsigned char *tx_bufs; 456 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma; 457 dma_addr_t tx_bufs_dma;
448 458
449 struct pci_dev *pdev; 459 int irq;
450 long pioaddr; 460 long pioaddr;
451 struct net_device *dev; 461 struct net_device *dev;
452 struct napi_struct napi; 462 struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
649 "failed" : "succeeded"); 659 "failed" : "succeeded");
650} 660}
651 661
652#ifdef USE_MMIO
653static void enable_mmio(long pioaddr, u32 quirks) 662static void enable_mmio(long pioaddr, u32 quirks)
654{ 663{
655 int n; 664 int n;
656 if (quirks & rqRhineI) { 665
657 /* More recent docs say that this bit is reserved ... */ 666 if (quirks & rqNeedEnMMIO) {
658 n = inb(pioaddr + ConfigA) | 0x20; 667 if (quirks & rqRhineI) {
659 outb(n, pioaddr + ConfigA); 668 /* More recent docs say that this bit is reserved */
660 } else { 669 n = inb(pioaddr + ConfigA) | 0x20;
661 n = inb(pioaddr + ConfigD) | 0x80; 670 outb(n, pioaddr + ConfigA);
662 outb(n, pioaddr + ConfigD); 671 } else {
672 n = inb(pioaddr + ConfigD) | 0x80;
673 outb(n, pioaddr + ConfigD);
674 }
663 } 675 }
664} 676}
665#endif 677
678static inline int verify_mmio(struct device *hwdev,
679 long pioaddr,
680 void __iomem *ioaddr,
681 u32 quirks)
682{
683 if (quirks & rqNeedEnMMIO) {
684 int i = 0;
685
686 /* Check that selected MMIO registers match the PIO ones */
687 while (mmio_verify_registers[i]) {
688 int reg = mmio_verify_registers[i++];
689 unsigned char a = inb(pioaddr+reg);
690 unsigned char b = readb(ioaddr+reg);
691
692 if (a != b) {
693 dev_err(hwdev,
694 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
695 reg, a, b);
696 return -EIO;
697 }
698 }
699 }
700 return 0;
701}
666 702
667/* 703/*
668 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 704 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
682 if (i > 512) 718 if (i > 512)
683 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); 719 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
684 720
685#ifdef USE_MMIO
686 /* 721 /*
687 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 722 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
688 * MMIO. If reloading EEPROM was done first this could be avoided, but 723 * MMIO. If reloading EEPROM was done first this could be avoided, but
689 * it is not known if that still works with the "win98-reboot" problem. 724 * it is not known if that still works with the "win98-reboot" problem.
690 */ 725 */
691 enable_mmio(pioaddr, rp->quirks); 726 enable_mmio(pioaddr, rp->quirks);
692#endif
693 727
694 /* Turn off EEPROM-controlled wake-up (magic packet) */ 728 /* Turn off EEPROM-controlled wake-up (magic packet) */
695 if (rp->quirks & rqWOL) 729 if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
701static void rhine_poll(struct net_device *dev) 735static void rhine_poll(struct net_device *dev)
702{ 736{
703 struct rhine_private *rp = netdev_priv(dev); 737 struct rhine_private *rp = netdev_priv(dev);
704 const int irq = rp->pdev->irq; 738 const int irq = rp->irq;
705 739
706 disable_irq(irq); 740 disable_irq(irq);
707 rhine_interrupt(irq, dev); 741 rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
846 msleep(5); 880 msleep(5);
847 881
848 /* Reload EEPROM controlled bytes cleared by soft reset */ 882 /* Reload EEPROM controlled bytes cleared by soft reset */
849 rhine_reload_eeprom(pioaddr, dev); 883 if (dev_is_pci(dev->dev.parent))
884 rhine_reload_eeprom(pioaddr, dev);
850} 885}
851 886
852static const struct net_device_ops rhine_netdev_ops = { 887static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
867#endif 902#endif
868}; 903};
869 904
870static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 905static int rhine_init_one_common(struct device *hwdev, u32 quirks,
906 long pioaddr, void __iomem *ioaddr, int irq)
871{ 907{
872 struct net_device *dev; 908 struct net_device *dev;
873 struct rhine_private *rp; 909 struct rhine_private *rp;
874 int i, rc; 910 int i, rc, phy_id;
875 u32 quirks;
876 long pioaddr;
877 long memaddr;
878 void __iomem *ioaddr;
879 int io_size, phy_id;
880 const char *name; 911 const char *name;
881#ifdef USE_MMIO
882 int bar = 1;
883#else
884 int bar = 0;
885#endif
886
887/* when built into the kernel, we only print version if device is found */
888#ifndef MODULE
889 pr_info_once("%s\n", version);
890#endif
891
892 io_size = 256;
893 phy_id = 0;
894 quirks = 0;
895 name = "Rhine";
896 if (pdev->revision < VTunknown0) {
897 quirks = rqRhineI;
898 io_size = 128;
899 }
900 else if (pdev->revision >= VT6102) {
901 quirks = rqWOL | rqForceReset;
902 if (pdev->revision < VT6105) {
903 name = "Rhine II";
904 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
905 }
906 else {
907 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
908 if (pdev->revision >= VT6105_B0)
909 quirks |= rq6patterns;
910 if (pdev->revision < VT6105M)
911 name = "Rhine III";
912 else
913 name = "Rhine III (Management Adapter)";
914 }
915 }
916
917 rc = pci_enable_device(pdev);
918 if (rc)
919 goto err_out;
920 912
921 /* this should always be supported */ 913 /* this should always be supported */
922 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 914 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
923 if (rc) { 915 if (rc) {
924 dev_err(&pdev->dev, 916 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
925 "32-bit PCI DMA addresses not supported by the card!?\n"); 917 goto err_out;
926 goto err_out_pci_disable;
927 }
928
929 /* sanity check */
930 if ((pci_resource_len(pdev, 0) < io_size) ||
931 (pci_resource_len(pdev, 1) < io_size)) {
932 rc = -EIO;
933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 goto err_out_pci_disable;
935 } 918 }
936 919
937 pioaddr = pci_resource_start(pdev, 0);
938 memaddr = pci_resource_start(pdev, 1);
939
940 pci_set_master(pdev);
941
942 dev = alloc_etherdev(sizeof(struct rhine_private)); 920 dev = alloc_etherdev(sizeof(struct rhine_private));
943 if (!dev) { 921 if (!dev) {
944 rc = -ENOMEM; 922 rc = -ENOMEM;
945 goto err_out_pci_disable; 923 goto err_out;
946 } 924 }
947 SET_NETDEV_DEV(dev, &pdev->dev); 925 SET_NETDEV_DEV(dev, hwdev);
948 926
949 rp = netdev_priv(dev); 927 rp = netdev_priv(dev);
950 rp->dev = dev; 928 rp->dev = dev;
951 rp->quirks = quirks; 929 rp->quirks = quirks;
952 rp->pioaddr = pioaddr; 930 rp->pioaddr = pioaddr;
953 rp->pdev = pdev; 931 rp->base = ioaddr;
932 rp->irq = irq;
954 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 933 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955 934
956 rc = pci_request_regions(pdev, DRV_NAME); 935 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
957 if (rc)
958 goto err_out_free_netdev;
959
960 ioaddr = pci_iomap(pdev, bar, io_size);
961 if (!ioaddr) {
962 rc = -EIO;
963 dev_err(&pdev->dev,
964 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 pci_name(pdev), io_size, memaddr);
966 goto err_out_free_res;
967 }
968
969#ifdef USE_MMIO
970 enable_mmio(pioaddr, quirks);
971
972 /* Check that selected MMIO registers match the PIO ones */
973 i = 0;
974 while (mmio_verify_registers[i]) {
975 int reg = mmio_verify_registers[i++];
976 unsigned char a = inb(pioaddr+reg);
977 unsigned char b = readb(ioaddr+reg);
978 if (a != b) {
979 rc = -EIO;
980 dev_err(&pdev->dev,
981 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 reg, a, b);
983 goto err_out_unmap;
984 }
985 }
986#endif /* USE_MMIO */
987
988 rp->base = ioaddr;
989 936
990 u64_stats_init(&rp->tx_stats.syncp); 937 u64_stats_init(&rp->tx_stats.syncp);
991 u64_stats_init(&rp->rx_stats.syncp); 938 u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1030 if (rp->quirks & rqRhineI) 977 if (rp->quirks & rqRhineI)
1031 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 978 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1032 979
1033 if (pdev->revision >= VT6105M) 980 if (rp->quirks & rqMgmt)
1034 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 981 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1035 NETIF_F_HW_VLAN_CTAG_RX | 982 NETIF_F_HW_VLAN_CTAG_RX |
1036 NETIF_F_HW_VLAN_CTAG_FILTER; 983 NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1038 /* dev->name not defined before register_netdev()! */ 985 /* dev->name not defined before register_netdev()! */
1039 rc = register_netdev(dev); 986 rc = register_netdev(dev);
1040 if (rc) 987 if (rc)
1041 goto err_out_unmap; 988 goto err_out_free_netdev;
989
990 if (rp->quirks & rqRhineI)
991 name = "Rhine";
992 else if (rp->quirks & rqStatusWBRace)
993 name = "Rhine II";
994 else if (rp->quirks & rqMgmt)
995 name = "Rhine III (Management Adapter)";
996 else
997 name = "Rhine III";
1042 998
1043 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 999 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1044 name, 1000 name, (long)ioaddr, dev->dev_addr, rp->irq);
1045#ifdef USE_MMIO
1046 memaddr,
1047#else
1048 (long)ioaddr,
1049#endif
1050 dev->dev_addr, pdev->irq);
1051 1001
1052 pci_set_drvdata(pdev, dev); 1002 dev_set_drvdata(hwdev, dev);
1053 1003
1054 { 1004 {
1055 u16 mii_cmd; 1005 u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1078 1028
1079 return 0; 1029 return 0;
1080 1030
1031err_out_free_netdev:
1032 free_netdev(dev);
1033err_out:
1034 return rc;
1035}
1036
1037static int rhine_init_one_pci(struct pci_dev *pdev,
1038 const struct pci_device_id *ent)
1039{
1040 struct device *hwdev = &pdev->dev;
1041 int rc;
1042 long pioaddr, memaddr;
1043 void __iomem *ioaddr;
1044 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1045
1046/* This driver was written to use PCI memory space. Some early versions
1047 * of the Rhine may only work correctly with I/O space accesses.
1048 * TODO: determine for which revisions this is true and assign the flag
1049 * in code as opposed to this Kconfig option (???)
1050 */
1051#ifdef CONFIG_VIA_RHINE_MMIO
1052 u32 quirks = rqNeedEnMMIO;
1053#else
1054 u32 quirks = 0;
1055#endif
1056
1057/* when built into the kernel, we only print version if device is found */
1058#ifndef MODULE
1059 pr_info_once("%s\n", version);
1060#endif
1061
1062 rc = pci_enable_device(pdev);
1063 if (rc)
1064 goto err_out;
1065
1066 if (pdev->revision < VTunknown0) {
1067 quirks |= rqRhineI;
1068 } else if (pdev->revision >= VT6102) {
1069 quirks |= rqWOL | rqForceReset;
1070 if (pdev->revision < VT6105) {
1071 quirks |= rqStatusWBRace;
1072 } else {
1073 quirks |= rqIntPHY;
1074 if (pdev->revision >= VT6105_B0)
1075 quirks |= rq6patterns;
1076 if (pdev->revision >= VT6105M)
1077 quirks |= rqMgmt;
1078 }
1079 }
1080
1081 /* sanity check */
1082 if ((pci_resource_len(pdev, 0) < io_size) ||
1083 (pci_resource_len(pdev, 1) < io_size)) {
1084 rc = -EIO;
1085 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1086 goto err_out_pci_disable;
1087 }
1088
1089 pioaddr = pci_resource_start(pdev, 0);
1090 memaddr = pci_resource_start(pdev, 1);
1091
1092 pci_set_master(pdev);
1093
1094 rc = pci_request_regions(pdev, DRV_NAME);
1095 if (rc)
1096 goto err_out_pci_disable;
1097
1098 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1099 if (!ioaddr) {
1100 rc = -EIO;
1101 dev_err(hwdev,
1102 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1103 dev_name(hwdev), io_size, memaddr);
1104 goto err_out_free_res;
1105 }
1106
1107 enable_mmio(pioaddr, quirks);
1108
1109 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1110 if (rc)
1111 goto err_out_unmap;
1112
1113 rc = rhine_init_one_common(&pdev->dev, quirks,
1114 pioaddr, ioaddr, pdev->irq);
1115 if (!rc)
1116 return 0;
1117
1081err_out_unmap: 1118err_out_unmap:
1082 pci_iounmap(pdev, ioaddr); 1119 pci_iounmap(pdev, ioaddr);
1083err_out_free_res: 1120err_out_free_res:
1084 pci_release_regions(pdev); 1121 pci_release_regions(pdev);
1085err_out_free_netdev:
1086 free_netdev(dev);
1087err_out_pci_disable: 1122err_out_pci_disable:
1088 pci_disable_device(pdev); 1123 pci_disable_device(pdev);
1089err_out: 1124err_out:
1090 return rc; 1125 return rc;
1091} 1126}
1092 1127
1128static int rhine_init_one_platform(struct platform_device *pdev)
1129{
1130 const struct of_device_id *match;
1131 const u32 *quirks;
1132 int irq;
1133 struct resource *res;
1134 void __iomem *ioaddr;
1135
1136 match = of_match_device(rhine_of_tbl, &pdev->dev);
1137 if (!match)
1138 return -EINVAL;
1139
1140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1142 if (IS_ERR(ioaddr))
1143 return PTR_ERR(ioaddr);
1144
1145 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1146 if (!irq)
1147 return -EINVAL;
1148
1149 quirks = match->data;
1150 if (!quirks)
1151 return -EINVAL;
1152
1153 return rhine_init_one_common(&pdev->dev, *quirks,
1154 (long)ioaddr, ioaddr, irq);
1155}
1156
1093static int alloc_ring(struct net_device* dev) 1157static int alloc_ring(struct net_device* dev)
1094{ 1158{
1095 struct rhine_private *rp = netdev_priv(dev); 1159 struct rhine_private *rp = netdev_priv(dev);
1160 struct device *hwdev = dev->dev.parent;
1096 void *ring; 1161 void *ring;
1097 dma_addr_t ring_dma; 1162 dma_addr_t ring_dma;
1098 1163
1099 ring = pci_alloc_consistent(rp->pdev, 1164 ring = dma_alloc_coherent(hwdev,
1100 RX_RING_SIZE * sizeof(struct rx_desc) + 1165 RX_RING_SIZE * sizeof(struct rx_desc) +
1101 TX_RING_SIZE * sizeof(struct tx_desc), 1166 TX_RING_SIZE * sizeof(struct tx_desc),
1102 &ring_dma); 1167 &ring_dma,
1168 GFP_ATOMIC);
1103 if (!ring) { 1169 if (!ring) {
1104 netdev_err(dev, "Could not allocate DMA memory\n"); 1170 netdev_err(dev, "Could not allocate DMA memory\n");
1105 return -ENOMEM; 1171 return -ENOMEM;
1106 } 1172 }
1107 if (rp->quirks & rqRhineI) { 1173 if (rp->quirks & rqRhineI) {
1108 rp->tx_bufs = pci_alloc_consistent(rp->pdev, 1174 rp->tx_bufs = dma_alloc_coherent(hwdev,
1109 PKT_BUF_SZ * TX_RING_SIZE, 1175 PKT_BUF_SZ * TX_RING_SIZE,
1110 &rp->tx_bufs_dma); 1176 &rp->tx_bufs_dma,
1177 GFP_ATOMIC);
1111 if (rp->tx_bufs == NULL) { 1178 if (rp->tx_bufs == NULL) {
1112 pci_free_consistent(rp->pdev, 1179 dma_free_coherent(hwdev,
1113 RX_RING_SIZE * sizeof(struct rx_desc) + 1180 RX_RING_SIZE * sizeof(struct rx_desc) +
1114 TX_RING_SIZE * sizeof(struct tx_desc), 1181 TX_RING_SIZE * sizeof(struct tx_desc),
1115 ring, ring_dma); 1182 ring, ring_dma);
1116 return -ENOMEM; 1183 return -ENOMEM;
1117 } 1184 }
1118 } 1185 }
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
1128static void free_ring(struct net_device* dev) 1195static void free_ring(struct net_device* dev)
1129{ 1196{
1130 struct rhine_private *rp = netdev_priv(dev); 1197 struct rhine_private *rp = netdev_priv(dev);
1198 struct device *hwdev = dev->dev.parent;
1131 1199
1132 pci_free_consistent(rp->pdev, 1200 dma_free_coherent(hwdev,
1133 RX_RING_SIZE * sizeof(struct rx_desc) + 1201 RX_RING_SIZE * sizeof(struct rx_desc) +
1134 TX_RING_SIZE * sizeof(struct tx_desc), 1202 TX_RING_SIZE * sizeof(struct tx_desc),
1135 rp->rx_ring, rp->rx_ring_dma); 1203 rp->rx_ring, rp->rx_ring_dma);
1136 rp->tx_ring = NULL; 1204 rp->tx_ring = NULL;
1137 1205
1138 if (rp->tx_bufs) 1206 if (rp->tx_bufs)
1139 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, 1207 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1140 rp->tx_bufs, rp->tx_bufs_dma); 1208 rp->tx_bufs, rp->tx_bufs_dma);
1141 1209
1142 rp->tx_bufs = NULL; 1210 rp->tx_bufs = NULL;
1143 1211
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
1146static void alloc_rbufs(struct net_device *dev) 1214static void alloc_rbufs(struct net_device *dev)
1147{ 1215{
1148 struct rhine_private *rp = netdev_priv(dev); 1216 struct rhine_private *rp = netdev_priv(dev);
1217 struct device *hwdev = dev->dev.parent;
1149 dma_addr_t next; 1218 dma_addr_t next;
1150 int i; 1219 int i;
1151 1220
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
1174 break; 1243 break;
1175 1244
1176 rp->rx_skbuff_dma[i] = 1245 rp->rx_skbuff_dma[i] =
1177 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, 1246 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
1178 PCI_DMA_FROMDEVICE); 1247 DMA_FROM_DEVICE);
1179 if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) { 1248 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
1180 rp->rx_skbuff_dma[i] = 0; 1249 rp->rx_skbuff_dma[i] = 0;
1181 dev_kfree_skb(skb); 1250 dev_kfree_skb(skb);
1182 break; 1251 break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
1190static void free_rbufs(struct net_device* dev) 1259static void free_rbufs(struct net_device* dev)
1191{ 1260{
1192 struct rhine_private *rp = netdev_priv(dev); 1261 struct rhine_private *rp = netdev_priv(dev);
1262 struct device *hwdev = dev->dev.parent;
1193 int i; 1263 int i;
1194 1264
1195 /* Free all the skbuffs in the Rx queue. */ 1265 /* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
1197 rp->rx_ring[i].rx_status = 0; 1267 rp->rx_ring[i].rx_status = 0;
1198 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1268 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1199 if (rp->rx_skbuff[i]) { 1269 if (rp->rx_skbuff[i]) {
1200 pci_unmap_single(rp->pdev, 1270 dma_unmap_single(hwdev,
1201 rp->rx_skbuff_dma[i], 1271 rp->rx_skbuff_dma[i],
1202 rp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1272 rp->rx_buf_sz, DMA_FROM_DEVICE);
1203 dev_kfree_skb(rp->rx_skbuff[i]); 1273 dev_kfree_skb(rp->rx_skbuff[i]);
1204 } 1274 }
1205 rp->rx_skbuff[i] = NULL; 1275 rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
1230static void free_tbufs(struct net_device* dev) 1300static void free_tbufs(struct net_device* dev)
1231{ 1301{
1232 struct rhine_private *rp = netdev_priv(dev); 1302 struct rhine_private *rp = netdev_priv(dev);
1303 struct device *hwdev = dev->dev.parent;
1233 int i; 1304 int i;
1234 1305
1235 for (i = 0; i < TX_RING_SIZE; i++) { 1306 for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
1238 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1309 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1239 if (rp->tx_skbuff[i]) { 1310 if (rp->tx_skbuff[i]) {
1240 if (rp->tx_skbuff_dma[i]) { 1311 if (rp->tx_skbuff_dma[i]) {
1241 pci_unmap_single(rp->pdev, 1312 dma_unmap_single(hwdev,
1242 rp->tx_skbuff_dma[i], 1313 rp->tx_skbuff_dma[i],
1243 rp->tx_skbuff[i]->len, 1314 rp->tx_skbuff[i]->len,
1244 PCI_DMA_TODEVICE); 1315 DMA_TO_DEVICE);
1245 } 1316 }
1246 dev_kfree_skb(rp->tx_skbuff[i]); 1317 dev_kfree_skb(rp->tx_skbuff[i]);
1247 } 1318 }
@@ -1278,8 +1349,9 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1278 /* autoneg is off: Link is always assumed to be up */ 1349 /* autoneg is off: Link is always assumed to be up */
1279 if (!netif_carrier_ok(dev)) 1350 if (!netif_carrier_ok(dev))
1280 netif_carrier_on(dev); 1351 netif_carrier_on(dev);
1281 } else /* Let MMI library update carrier status */ 1352 }
1282 rhine_check_media(dev, 0); 1353
1354 rhine_check_media(dev, 0);
1283 1355
1284 netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1356 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1285 mii->force_media, netif_carrier_ok(dev)); 1357 mii->force_media, netif_carrier_ok(dev));
@@ -1469,7 +1541,7 @@ static void init_registers(struct net_device *dev)
1469 1541
1470 rhine_set_rx_mode(dev); 1542 rhine_set_rx_mode(dev);
1471 1543
1472 if (rp->pdev->revision >= VT6105M) 1544 if (rp->quirks & rqMgmt)
1473 rhine_init_cam_filter(dev); 1545 rhine_init_cam_filter(dev);
1474 1546
1475 napi_enable(&rp->napi); 1547 napi_enable(&rp->napi);
@@ -1581,16 +1653,15 @@ static int rhine_open(struct net_device *dev)
1581 void __iomem *ioaddr = rp->base; 1653 void __iomem *ioaddr = rp->base;
1582 int rc; 1654 int rc;
1583 1655
1584 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, 1656 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1585 dev);
1586 if (rc) 1657 if (rc)
1587 return rc; 1658 return rc;
1588 1659
1589 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq); 1660 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1590 1661
1591 rc = alloc_ring(dev); 1662 rc = alloc_ring(dev);
1592 if (rc) { 1663 if (rc) {
1593 free_irq(rp->pdev->irq, dev); 1664 free_irq(rp->irq, dev);
1594 return rc; 1665 return rc;
1595 } 1666 }
1596 alloc_rbufs(dev); 1667 alloc_rbufs(dev);
@@ -1659,6 +1730,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1659 struct net_device *dev) 1730 struct net_device *dev)
1660{ 1731{
1661 struct rhine_private *rp = netdev_priv(dev); 1732 struct rhine_private *rp = netdev_priv(dev);
1733 struct device *hwdev = dev->dev.parent;
1662 void __iomem *ioaddr = rp->base; 1734 void __iomem *ioaddr = rp->base;
1663 unsigned entry; 1735 unsigned entry;
1664 1736
@@ -1695,9 +1767,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1695 rp->tx_bufs)); 1767 rp->tx_bufs));
1696 } else { 1768 } else {
1697 rp->tx_skbuff_dma[entry] = 1769 rp->tx_skbuff_dma[entry] =
1698 pci_map_single(rp->pdev, skb->data, skb->len, 1770 dma_map_single(hwdev, skb->data, skb->len,
1699 PCI_DMA_TODEVICE); 1771 DMA_TO_DEVICE);
1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { 1772 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1701 dev_kfree_skb_any(skb); 1773 dev_kfree_skb_any(skb);
1702 rp->tx_skbuff_dma[entry] = 0; 1774 rp->tx_skbuff_dma[entry] = 0;
1703 dev->stats.tx_dropped++; 1775 dev->stats.tx_dropped++;
@@ -1788,6 +1860,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1788static void rhine_tx(struct net_device *dev) 1860static void rhine_tx(struct net_device *dev)
1789{ 1861{
1790 struct rhine_private *rp = netdev_priv(dev); 1862 struct rhine_private *rp = netdev_priv(dev);
1863 struct device *hwdev = dev->dev.parent;
1791 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1864 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1792 1865
1793 /* find and cleanup dirty tx descriptors */ 1866 /* find and cleanup dirty tx descriptors */
@@ -1831,10 +1904,10 @@ static void rhine_tx(struct net_device *dev)
1831 } 1904 }
1832 /* Free the original skb. */ 1905 /* Free the original skb. */
1833 if (rp->tx_skbuff_dma[entry]) { 1906 if (rp->tx_skbuff_dma[entry]) {
1834 pci_unmap_single(rp->pdev, 1907 dma_unmap_single(hwdev,
1835 rp->tx_skbuff_dma[entry], 1908 rp->tx_skbuff_dma[entry],
1836 rp->tx_skbuff[entry]->len, 1909 rp->tx_skbuff[entry]->len,
1837 PCI_DMA_TODEVICE); 1910 DMA_TO_DEVICE);
1838 } 1911 }
1839 dev_consume_skb_any(rp->tx_skbuff[entry]); 1912 dev_consume_skb_any(rp->tx_skbuff[entry]);
1840 rp->tx_skbuff[entry] = NULL; 1913 rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1936,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1863static int rhine_rx(struct net_device *dev, int limit) 1936static int rhine_rx(struct net_device *dev, int limit)
1864{ 1937{
1865 struct rhine_private *rp = netdev_priv(dev); 1938 struct rhine_private *rp = netdev_priv(dev);
1939 struct device *hwdev = dev->dev.parent;
1866 int count; 1940 int count;
1867 int entry = rp->cur_rx % RX_RING_SIZE; 1941 int entry = rp->cur_rx % RX_RING_SIZE;
1868 1942
@@ -1924,19 +1998,19 @@ static int rhine_rx(struct net_device *dev, int limit)
1924 if (pkt_len < rx_copybreak) 1998 if (pkt_len < rx_copybreak)
1925 skb = netdev_alloc_skb_ip_align(dev, pkt_len); 1999 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1926 if (skb) { 2000 if (skb) {
1927 pci_dma_sync_single_for_cpu(rp->pdev, 2001 dma_sync_single_for_cpu(hwdev,
1928 rp->rx_skbuff_dma[entry], 2002 rp->rx_skbuff_dma[entry],
1929 rp->rx_buf_sz, 2003 rp->rx_buf_sz,
1930 PCI_DMA_FROMDEVICE); 2004 DMA_FROM_DEVICE);
1931 2005
1932 skb_copy_to_linear_data(skb, 2006 skb_copy_to_linear_data(skb,
1933 rp->rx_skbuff[entry]->data, 2007 rp->rx_skbuff[entry]->data,
1934 pkt_len); 2008 pkt_len);
1935 skb_put(skb, pkt_len); 2009 skb_put(skb, pkt_len);
1936 pci_dma_sync_single_for_device(rp->pdev, 2010 dma_sync_single_for_device(hwdev,
1937 rp->rx_skbuff_dma[entry], 2011 rp->rx_skbuff_dma[entry],
1938 rp->rx_buf_sz, 2012 rp->rx_buf_sz,
1939 PCI_DMA_FROMDEVICE); 2013 DMA_FROM_DEVICE);
1940 } else { 2014 } else {
1941 skb = rp->rx_skbuff[entry]; 2015 skb = rp->rx_skbuff[entry];
1942 if (skb == NULL) { 2016 if (skb == NULL) {
@@ -1945,10 +2019,10 @@ static int rhine_rx(struct net_device *dev, int limit)
1945 } 2019 }
1946 rp->rx_skbuff[entry] = NULL; 2020 rp->rx_skbuff[entry] = NULL;
1947 skb_put(skb, pkt_len); 2021 skb_put(skb, pkt_len);
1948 pci_unmap_single(rp->pdev, 2022 dma_unmap_single(hwdev,
1949 rp->rx_skbuff_dma[entry], 2023 rp->rx_skbuff_dma[entry],
1950 rp->rx_buf_sz, 2024 rp->rx_buf_sz,
1951 PCI_DMA_FROMDEVICE); 2025 DMA_FROM_DEVICE);
1952 } 2026 }
1953 2027
1954 if (unlikely(desc_length & DescTag)) 2028 if (unlikely(desc_length & DescTag))
@@ -1979,10 +2053,11 @@ static int rhine_rx(struct net_device *dev, int limit)
1979 if (skb == NULL) 2053 if (skb == NULL)
1980 break; /* Better luck next round. */ 2054 break; /* Better luck next round. */
1981 rp->rx_skbuff_dma[entry] = 2055 rp->rx_skbuff_dma[entry] =
1982 pci_map_single(rp->pdev, skb->data, 2056 dma_map_single(hwdev, skb->data,
1983 rp->rx_buf_sz, 2057 rp->rx_buf_sz,
1984 PCI_DMA_FROMDEVICE); 2058 DMA_FROM_DEVICE);
1985 if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) { 2059 if (dma_mapping_error(hwdev,
2060 rp->rx_skbuff_dma[entry])) {
1986 dev_kfree_skb(skb); 2061 dev_kfree_skb(skb);
1987 rp->rx_skbuff_dma[entry] = 0; 2062 rp->rx_skbuff_dma[entry] = 0;
1988 break; 2063 break;
@@ -2103,7 +2178,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
2103 /* Too many to match, or accept all multicasts. */ 2178 /* Too many to match, or accept all multicasts. */
2104 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2179 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2105 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2180 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2106 } else if (rp->pdev->revision >= VT6105M) { 2181 } else if (rp->quirks & rqMgmt) {
2107 int i = 0; 2182 int i = 0;
2108 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 2183 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2109 netdev_for_each_mc_addr(ha, dev) { 2184 netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2200,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
2125 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2200 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2126 } 2201 }
2127 /* enable/disable VLAN receive filtering */ 2202 /* enable/disable VLAN receive filtering */
2128 if (rp->pdev->revision >= VT6105M) { 2203 if (rp->quirks & rqMgmt) {
2129 if (dev->flags & IFF_PROMISC) 2204 if (dev->flags & IFF_PROMISC)
2130 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2205 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2131 else 2206 else
@@ -2136,11 +2211,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
2136 2211
2137static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2212static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2138{ 2213{
2139 struct rhine_private *rp = netdev_priv(dev); 2214 struct device *hwdev = dev->dev.parent;
2140 2215
2141 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2216 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2142 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2217 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2143 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); 2218 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2144} 2219}
2145 2220
2146static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2221static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2352,7 @@ static int rhine_close(struct net_device *dev)
2277 /* Stop the chip's Tx and Rx processes. */ 2352 /* Stop the chip's Tx and Rx processes. */
2278 iowrite16(CmdStop, ioaddr + ChipCmd); 2353 iowrite16(CmdStop, ioaddr + ChipCmd);
2279 2354
2280 free_irq(rp->pdev->irq, dev); 2355 free_irq(rp->irq, dev);
2281 free_rbufs(dev); 2356 free_rbufs(dev);
2282 free_tbufs(dev); 2357 free_tbufs(dev);
2283 free_ring(dev); 2358 free_ring(dev);
@@ -2286,7 +2361,7 @@ static int rhine_close(struct net_device *dev)
2286} 2361}
2287 2362
2288 2363
2289static void rhine_remove_one(struct pci_dev *pdev) 2364static void rhine_remove_one_pci(struct pci_dev *pdev)
2290{ 2365{
2291 struct net_device *dev = pci_get_drvdata(pdev); 2366 struct net_device *dev = pci_get_drvdata(pdev);
2292 struct rhine_private *rp = netdev_priv(dev); 2367 struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2375,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
2300 pci_disable_device(pdev); 2375 pci_disable_device(pdev);
2301} 2376}
2302 2377
2303static void rhine_shutdown (struct pci_dev *pdev) 2378static int rhine_remove_one_platform(struct platform_device *pdev)
2379{
2380 struct net_device *dev = platform_get_drvdata(pdev);
2381 struct rhine_private *rp = netdev_priv(dev);
2382
2383 unregister_netdev(dev);
2384
2385 iounmap(rp->base);
2386
2387 free_netdev(dev);
2388
2389 return 0;
2390}
2391
2392static void rhine_shutdown_pci(struct pci_dev *pdev)
2304{ 2393{
2305 struct net_device *dev = pci_get_drvdata(pdev); 2394 struct net_device *dev = pci_get_drvdata(pdev);
2306 struct rhine_private *rp = netdev_priv(dev); 2395 struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2443,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
2354#ifdef CONFIG_PM_SLEEP 2443#ifdef CONFIG_PM_SLEEP
2355static int rhine_suspend(struct device *device) 2444static int rhine_suspend(struct device *device)
2356{ 2445{
2357 struct pci_dev *pdev = to_pci_dev(device); 2446 struct net_device *dev = dev_get_drvdata(device);
2358 struct net_device *dev = pci_get_drvdata(pdev);
2359 struct rhine_private *rp = netdev_priv(dev); 2447 struct rhine_private *rp = netdev_priv(dev);
2360 2448
2361 if (!netif_running(dev)) 2449 if (!netif_running(dev))
@@ -2367,23 +2455,21 @@ static int rhine_suspend(struct device *device)
2367 2455
2368 netif_device_detach(dev); 2456 netif_device_detach(dev);
2369 2457
2370 rhine_shutdown(pdev); 2458 if (dev_is_pci(device))
2459 rhine_shutdown_pci(to_pci_dev(device));
2371 2460
2372 return 0; 2461 return 0;
2373} 2462}
2374 2463
2375static int rhine_resume(struct device *device) 2464static int rhine_resume(struct device *device)
2376{ 2465{
2377 struct pci_dev *pdev = to_pci_dev(device); 2466 struct net_device *dev = dev_get_drvdata(device);
2378 struct net_device *dev = pci_get_drvdata(pdev);
2379 struct rhine_private *rp = netdev_priv(dev); 2467 struct rhine_private *rp = netdev_priv(dev);
2380 2468
2381 if (!netif_running(dev)) 2469 if (!netif_running(dev))
2382 return 0; 2470 return 0;
2383 2471
2384#ifdef USE_MMIO
2385 enable_mmio(rp->pioaddr, rp->quirks); 2472 enable_mmio(rp->pioaddr, rp->quirks);
2386#endif
2387 rhine_power_init(dev); 2473 rhine_power_init(dev);
2388 free_tbufs(dev); 2474 free_tbufs(dev);
2389 free_rbufs(dev); 2475 free_rbufs(dev);
@@ -2408,15 +2494,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2408 2494
2409#endif /* !CONFIG_PM_SLEEP */ 2495#endif /* !CONFIG_PM_SLEEP */
2410 2496
2411static struct pci_driver rhine_driver = { 2497static struct pci_driver rhine_driver_pci = {
2412 .name = DRV_NAME, 2498 .name = DRV_NAME,
2413 .id_table = rhine_pci_tbl, 2499 .id_table = rhine_pci_tbl,
2414 .probe = rhine_init_one, 2500 .probe = rhine_init_one_pci,
2415 .remove = rhine_remove_one, 2501 .remove = rhine_remove_one_pci,
2416 .shutdown = rhine_shutdown, 2502 .shutdown = rhine_shutdown_pci,
2417 .driver.pm = RHINE_PM_OPS, 2503 .driver.pm = RHINE_PM_OPS,
2418}; 2504};
2419 2505
2506static struct platform_driver rhine_driver_platform = {
2507 .probe = rhine_init_one_platform,
2508 .remove = rhine_remove_one_platform,
2509 .driver = {
2510 .name = DRV_NAME,
2511 .owner = THIS_MODULE,
2512 .of_match_table = rhine_of_tbl,
2513 .pm = RHINE_PM_OPS,
2514 }
2515};
2516
2420static struct dmi_system_id rhine_dmi_table[] __initdata = { 2517static struct dmi_system_id rhine_dmi_table[] __initdata = {
2421 { 2518 {
2422 .ident = "EPIA-M", 2519 .ident = "EPIA-M",
@@ -2437,6 +2534,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
2437 2534
2438static int __init rhine_init(void) 2535static int __init rhine_init(void)
2439{ 2536{
2537 int ret_pci, ret_platform;
2538
2440/* when a module, this is printed whether or not devices are found in probe */ 2539/* when a module, this is printed whether or not devices are found in probe */
2441#ifdef MODULE 2540#ifdef MODULE
2442 pr_info("%s\n", version); 2541 pr_info("%s\n", version);
@@ -2449,13 +2548,19 @@ static int __init rhine_init(void)
2449 else if (avoid_D3) 2548 else if (avoid_D3)
2450 pr_info("avoid_D3 set\n"); 2549 pr_info("avoid_D3 set\n");
2451 2550
2452 return pci_register_driver(&rhine_driver); 2551 ret_pci = pci_register_driver(&rhine_driver_pci);
2552 ret_platform = platform_driver_register(&rhine_driver_platform);
2553 if ((ret_pci < 0) && (ret_platform < 0))
2554 return ret_pci;
2555
2556 return 0;
2453} 2557}
2454 2558
2455 2559
2456static void __exit rhine_cleanup(void) 2560static void __exit rhine_cleanup(void)
2457{ 2561{
2458 pci_unregister_driver(&rhine_driver); 2562 platform_driver_unregister(&rhine_driver_platform);
2563 pci_unregister_driver(&rhine_driver_pci);
2459} 2564}
2460 2565
2461 2566
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fa193c4688da..4ef818a7a6c6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
75 long end = jiffies + 2; 75 long end = jiffies + 2;
76 76
77 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { 77 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
78 if (end - jiffies <= 0) { 78 if (time_before_eq(end, jiffies)) {
79 WARN_ON(1); 79 WARN_ON(1);
80 return -ETIMEDOUT; 80 return -ETIMEDOUT;
81 } 81 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 64b4639f43b6..d4abf478e2bb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
22 long end = jiffies + 2; 22 long end = jiffies + 2;
23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) & 23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
24 XAE_MDIO_MCR_READY_MASK)) { 24 XAE_MDIO_MCR_READY_MASK)) {
25 if (end - jiffies <= 0) { 25 if (time_before_eq(end, jiffies)) {
26 WARN_ON(1); 26 WARN_ON(1);
27 return -ETIMEDOUT; 27 return -ETIMEDOUT;
28 } 28 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0d87c67a5ff7..8c4aed3053eb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
702 */ 702 */
703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
704 XEL_MDIOCTRL_MDIOSTS_MASK) { 704 XEL_MDIOCTRL_MDIOSTS_MASK) {
705 if (end - jiffies <= 0) { 705 if (time_before_eq(end, jiffies)) {
706 WARN_ON(1); 706 WARN_ON(1);
707 return -ETIMEDOUT; 707 return -ETIMEDOUT;
708 } 708 }