aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
commitf9da455b93f6ba076935b4ef4589f61e529ae046 (patch)
tree3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /drivers/net
parent0e04c641b199435f3779454055f6a7de258ecdfc (diff)
parente5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov. 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J Benniston. 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn Mork. 4) BPF now has a "random" opcode, from Chema Gonzalez. 5) Add more BPF documentation and improve test framework, from Daniel Borkmann. 6) Support TCP fastopen over ipv6, from Daniel Lee. 7) Add software TSO helper functions and use them to support software TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia. 8) Support software TSO in fec driver too, from Nimrod Andy. 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli. 10) Handle broadcasts more gracefully over macvlan when there are large numbers of interfaces configured, from Herbert Xu. 11) Allow more control over fwmark used for non-socket based responses, from Lorenzo Colitti. 12) Do TCP congestion window limiting based upon measurements, from Neal Cardwell. 13) Support busy polling in SCTP, from Neal Horman. 14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru. 15) Bridge promisc mode handling improvements from Vlad Yasevich. 16) Don't use inetpeer entries to implement ID generation any more, it performs poorly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 tcp: fixing TLP's FIN recovery net: fec: Add software TSO support net: fec: Add Scatter/gather support net: fec: Increase buffer descriptor entry number net: fec: Factorize feature setting net: fec: Enable IP header hardware checksum net: fec: Factorize the .xmit transmit function bridge: fix compile error when compiling without IPv6 support bridge: fix smatch warning / potential null pointer dereference via-rhine: fix full-duplex with autoneg disable bnx2x: Enlarge the dorq threshold for VFs bnx2x: Check for UNDI in uncommon branch bnx2x: Fix 1G-baseT link bnx2x: Fix link for KR with swapped polarity lane sctp: Fix sk_ack_backlog wrap-around problem net/core: Add VF link state control policy net/fsl: xgmac_mdio is dependent on OF_MDIO net/fsl: Make xgmac_mdio read error message useful net_sched: drr: warn when qdisc is not work conserving ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_3ad.c62
-rw-r--r--drivers/net/bonding/bond_alb.c153
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c218
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c66
-rw-r--r--drivers/net/bonding/bond_options.h2
-rw-r--r--drivers/net/bonding/bond_procfs.c16
-rw-r--r--drivers/net/bonding/bond_sysfs.c567
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/bonding/bonding.h143
-rw-r--r--drivers/net/can/Kconfig30
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/c_can/c_can.c15
-rw-r--r--drivers/net/can/c_can/c_can.h8
-rw-r--r--drivers/net/can/c_can/c_can_pci.c78
-rw-r--r--drivers/net/can/c_can/c_can_platform.c84
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/rcar_can.c876
-rw-r--r--drivers/net/can/softing/softing_main.c20
-rw-r--r--drivers/net/can/spi/Kconfig10
-rw-r--r--drivers/net/can/spi/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251x.c (renamed from drivers/net/can/mcp251x.c)95
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/gs_usb.c971
-rw-r--r--drivers/net/can/usb/kvaser_usb.c53
-rw-r--r--drivers/net/can/xilinx_can.c1208
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c2
-rw-r--r--drivers/net/dsa/mv88e6131.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1007
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c375
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c556
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1351
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c510
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c512
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c433
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h676
-rw-r--r--drivers/net/ethernet/arc/emac_main.c49
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1654
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h678
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c104
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h32
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c323
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c66
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c610
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h85
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c194
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c581
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h13
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c661
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1066
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c423
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c62
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c121
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c134
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c356
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c376
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c324
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c87
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c69
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c192
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c171
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/io.h7
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c114
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c39
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/tile/tilegx.c13
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c511
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h163
-rw-r--r--drivers/net/hyperv/netvsc.c529
-rw-r--r--drivers/net/hyperv/netvsc_drv.c129
-rw-r--r--drivers/net/hyperv/rndis_filter.c193
-rw-r--r--drivers/net/ieee802154/at86rf230.c133
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c33
-rw-r--r--drivers/net/irda/Kconfig3
-rw-r--r--drivers/net/irda/via-ircc.c7
-rw-r--r--drivers/net/irda/w83977af_ir.c33
-rw-r--r--drivers/net/macvlan.c262
-rw-r--r--drivers/net/ntb_netdev.c3
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c1357
-rw-r--r--drivers/net/phy/at803x.c39
-rw-r--r--drivers/net/phy/fixed.c81
-rw-r--r--drivers/net/phy/mdio_bus.c73
-rw-r--r--drivers/net/phy/micrel.c106
-rw-r--r--drivers/net/phy/phy_device.c50
-rw-r--r--drivers/net/phy/realtek.c88
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_loadbalance.c12
-rw-r--r--drivers/net/tun.c54
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c129
-rw-r--r--drivers/net/usb/cdc_ncm.c740
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c13
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vxlan.c187
-rw-r--r--drivers/net/wan/farsync.c31
-rw-r--r--drivers/net/wan/sdla.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wireless/at76c50x-usb.c180
-rw-r--r--drivers/net/wireless/at76c50x-usb.h26
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c383
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c366
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h26
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h37
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c587
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c990
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c336
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig30
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c253
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h72
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c214
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c163
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c8
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c45
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c28
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h50
-rw-r--r--drivers/net/wireless/b43/Kconfig42
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/bus.h10
-rw-r--r--drivers/net/wireless/b43/main.c498
-rw-r--r--drivers/net/wireless/b43/phy_common.c96
-rw-r--r--drivers/net/wireless/b43/phy_common.h8
-rw-r--r--drivers/net/wireless/b43/phy_g.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c321
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2056.c1336
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c150
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h3
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c332
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/nvram.h)24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c80
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c270
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c213
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/d11.c93
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h14
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h1
-rw-r--r--drivers/net/wireless/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c39
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c29
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h)32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c114
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c129
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c61
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c204
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c102
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c410
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c491
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c189
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c87
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c86
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c197
-rw-r--r--drivers/net/wireless/libertas/cfg.c7
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/rx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c45
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c28
-rw-r--r--drivers/net/wireless/mwifiex/README7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c19
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c25
-rw-r--r--drivers/net/wireless/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/mwifiex/fw.h25
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/scan.c66
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c15
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h18
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c18
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c4
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c97
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c55
-rw-r--r--drivers/net/wireless/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c22
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h5
-rw-r--r--drivers/net/wireless/orinoco/hw.c4
-rw-r--r--drivers/net/wireless/orinoco/hw.h4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/orinoco/wext.c4
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c27
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h6
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c68
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c44
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c69
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h4
-rw-r--r--drivers/net/xen-netback/common.h107
-rw-r--r--drivers/net/xen-netback/interface.c523
-rw-r--r--drivers/net/xen-netback/netback.c754
-rw-r--r--drivers/net/xen-netback/xenbus.c182
-rw-r--r--drivers/net/xen-netfront.c1123
692 files changed, 37654 insertions, 13603 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b667a51ed215..0dfeaf5da3f2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -157,7 +157,7 @@ static inline struct aggregator *__get_first_agg(struct port *port)
157 157
158 rcu_read_lock(); 158 rcu_read_lock();
159 first_slave = bond_first_slave_rcu(bond); 159 first_slave = bond_first_slave_rcu(bond);
160 agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL; 160 agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
161 rcu_read_unlock(); 161 rcu_read_unlock();
162 162
163 return agg; 163 return agg;
@@ -192,7 +192,7 @@ static inline void __enable_port(struct port *port)
192{ 192{
193 struct slave *slave = port->slave; 193 struct slave *slave = port->slave;
194 194
195 if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) 195 if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
196 bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); 196 bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
197} 197}
198 198
@@ -241,7 +241,7 @@ static inline int __check_agg_selection_timer(struct port *port)
241 */ 241 */
242static inline void __get_state_machine_lock(struct port *port) 242static inline void __get_state_machine_lock(struct port *port)
243{ 243{
244 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); 244 spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
245} 245}
246 246
247/** 247/**
@@ -250,7 +250,7 @@ static inline void __get_state_machine_lock(struct port *port)
250 */ 250 */
251static inline void __release_state_machine_lock(struct port *port) 251static inline void __release_state_machine_lock(struct port *port)
252{ 252{
253 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); 253 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
254} 254}
255 255
256/** 256/**
@@ -350,7 +350,7 @@ static u8 __get_duplex(struct port *port)
350static inline void __initialize_port_locks(struct slave *slave) 350static inline void __initialize_port_locks(struct slave *slave)
351{ 351{
352 /* make sure it isn't called twice */ 352 /* make sure it isn't called twice */
353 spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock)); 353 spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
354} 354}
355 355
356/* Conversions */ 356/* Conversions */
@@ -688,8 +688,8 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
688 struct slave *slave; 688 struct slave *slave;
689 689
690 bond_for_each_slave_rcu(bond, slave, iter) 690 bond_for_each_slave_rcu(bond, slave, iter)
691 if (SLAVE_AD_INFO(slave).aggregator.is_active) 691 if (SLAVE_AD_INFO(slave)->aggregator.is_active)
692 return &(SLAVE_AD_INFO(slave).aggregator); 692 return &(SLAVE_AD_INFO(slave)->aggregator);
693 693
694 return NULL; 694 return NULL;
695} 695}
@@ -1293,7 +1293,7 @@ static void ad_port_selection_logic(struct port *port)
1293 } 1293 }
1294 /* search on all aggregators for a suitable aggregator for this port */ 1294 /* search on all aggregators for a suitable aggregator for this port */
1295 bond_for_each_slave(bond, slave, iter) { 1295 bond_for_each_slave(bond, slave, iter) {
1296 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1296 aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
1297 1297
1298 /* keep a free aggregator for later use(if needed) */ 1298 /* keep a free aggregator for later use(if needed) */
1299 if (!aggregator->lag_ports) { 1299 if (!aggregator->lag_ports) {
@@ -1504,7 +1504,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1504 best = (active && agg_device_up(active)) ? active : NULL; 1504 best = (active && agg_device_up(active)) ? active : NULL;
1505 1505
1506 bond_for_each_slave_rcu(bond, slave, iter) { 1506 bond_for_each_slave_rcu(bond, slave, iter) {
1507 agg = &(SLAVE_AD_INFO(slave).aggregator); 1507 agg = &(SLAVE_AD_INFO(slave)->aggregator);
1508 1508
1509 agg->is_active = 0; 1509 agg->is_active = 0;
1510 1510
@@ -1549,7 +1549,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1549 best->slave ? best->slave->dev->name : "NULL"); 1549 best->slave ? best->slave->dev->name : "NULL");
1550 1550
1551 bond_for_each_slave_rcu(bond, slave, iter) { 1551 bond_for_each_slave_rcu(bond, slave, iter) {
1552 agg = &(SLAVE_AD_INFO(slave).aggregator); 1552 agg = &(SLAVE_AD_INFO(slave)->aggregator);
1553 1553
1554 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", 1554 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1555 agg->aggregator_identifier, agg->num_of_ports, 1555 agg->aggregator_identifier, agg->num_of_ports,
@@ -1840,16 +1840,16 @@ void bond_3ad_bind_slave(struct slave *slave)
1840 struct aggregator *aggregator; 1840 struct aggregator *aggregator;
1841 1841
1842 /* check that the slave has not been initialized yet. */ 1842 /* check that the slave has not been initialized yet. */
1843 if (SLAVE_AD_INFO(slave).port.slave != slave) { 1843 if (SLAVE_AD_INFO(slave)->port.slave != slave) {
1844 1844
1845 /* port initialization */ 1845 /* port initialization */
1846 port = &(SLAVE_AD_INFO(slave).port); 1846 port = &(SLAVE_AD_INFO(slave)->port);
1847 1847
1848 ad_initialize_port(port, bond->params.lacp_fast); 1848 ad_initialize_port(port, bond->params.lacp_fast);
1849 1849
1850 __initialize_port_locks(slave); 1850 __initialize_port_locks(slave);
1851 port->slave = slave; 1851 port->slave = slave;
1852 port->actor_port_number = SLAVE_AD_INFO(slave).id; 1852 port->actor_port_number = SLAVE_AD_INFO(slave)->id;
1853 /* key is determined according to the link speed, duplex and user key(which 1853 /* key is determined according to the link speed, duplex and user key(which
1854 * is yet not supported) 1854 * is yet not supported)
1855 */ 1855 */
@@ -1874,7 +1874,7 @@ void bond_3ad_bind_slave(struct slave *slave)
1874 __disable_port(port); 1874 __disable_port(port);
1875 1875
1876 /* aggregator initialization */ 1876 /* aggregator initialization */
1877 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1877 aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
1878 1878
1879 ad_initialize_agg(aggregator); 1879 ad_initialize_agg(aggregator);
1880 1880
@@ -1903,8 +1903,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
1903 struct slave *slave_iter; 1903 struct slave *slave_iter;
1904 struct list_head *iter; 1904 struct list_head *iter;
1905 1905
1906 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1906 aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
1907 port = &(SLAVE_AD_INFO(slave).port); 1907 port = &(SLAVE_AD_INFO(slave)->port);
1908 1908
1909 /* if slave is null, the whole port is not initialized */ 1909 /* if slave is null, the whole port is not initialized */
1910 if (!port->slave) { 1910 if (!port->slave) {
@@ -1932,7 +1932,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1932 (aggregator->lag_ports->next_port_in_aggregator)) { 1932 (aggregator->lag_ports->next_port_in_aggregator)) {
1933 /* find new aggregator for the related port(s) */ 1933 /* find new aggregator for the related port(s) */
1934 bond_for_each_slave(bond, slave_iter, iter) { 1934 bond_for_each_slave(bond, slave_iter, iter) {
1935 new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator); 1935 new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
1936 /* if the new aggregator is empty, or it is 1936 /* if the new aggregator is empty, or it is
1937 * connected to our port only 1937 * connected to our port only
1938 */ 1938 */
@@ -2010,7 +2010,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2010 2010
2011 /* find the aggregator that this port is connected to */ 2011 /* find the aggregator that this port is connected to */
2012 bond_for_each_slave(bond, slave_iter, iter) { 2012 bond_for_each_slave(bond, slave_iter, iter) {
2013 temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator); 2013 temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
2014 prev_port = NULL; 2014 prev_port = NULL;
2015 /* search the port in the aggregator's related ports */ 2015 /* search the port in the aggregator's related ports */
2016 for (temp_port = temp_aggregator->lag_ports; temp_port; 2016 for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2076,7 +2076,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2076 if (BOND_AD_INFO(bond).agg_select_timer && 2076 if (BOND_AD_INFO(bond).agg_select_timer &&
2077 !(--BOND_AD_INFO(bond).agg_select_timer)) { 2077 !(--BOND_AD_INFO(bond).agg_select_timer)) {
2078 slave = bond_first_slave_rcu(bond); 2078 slave = bond_first_slave_rcu(bond);
2079 port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL; 2079 port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
2080 2080
2081 /* select the active aggregator for the bond */ 2081 /* select the active aggregator for the bond */
2082 if (port) { 2082 if (port) {
@@ -2094,7 +2094,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2094 2094
2095 /* for each port run the state machines */ 2095 /* for each port run the state machines */
2096 bond_for_each_slave_rcu(bond, slave, iter) { 2096 bond_for_each_slave_rcu(bond, slave, iter) {
2097 port = &(SLAVE_AD_INFO(slave).port); 2097 port = &(SLAVE_AD_INFO(slave)->port);
2098 if (!port->slave) { 2098 if (!port->slave) {
2099 pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n", 2099 pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
2100 bond->dev->name); 2100 bond->dev->name);
@@ -2155,7 +2155,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
2155 2155
2156 if (length >= sizeof(struct lacpdu)) { 2156 if (length >= sizeof(struct lacpdu)) {
2157 2157
2158 port = &(SLAVE_AD_INFO(slave).port); 2158 port = &(SLAVE_AD_INFO(slave)->port);
2159 2159
2160 if (!port->slave) { 2160 if (!port->slave) {
2161 pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n", 2161 pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
@@ -2212,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2212{ 2212{
2213 struct port *port; 2213 struct port *port;
2214 2214
2215 port = &(SLAVE_AD_INFO(slave).port); 2215 port = &(SLAVE_AD_INFO(slave)->port);
2216 2216
2217 /* if slave is null, the whole port is not initialized */ 2217 /* if slave is null, the whole port is not initialized */
2218 if (!port->slave) { 2218 if (!port->slave) {
@@ -2245,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2245{ 2245{
2246 struct port *port; 2246 struct port *port;
2247 2247
2248 port = &(SLAVE_AD_INFO(slave).port); 2248 port = &(SLAVE_AD_INFO(slave)->port);
2249 2249
2250 /* if slave is null, the whole port is not initialized */ 2250 /* if slave is null, the whole port is not initialized */
2251 if (!port->slave) { 2251 if (!port->slave) {
@@ -2279,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2279{ 2279{
2280 struct port *port; 2280 struct port *port;
2281 2281
2282 port = &(SLAVE_AD_INFO(slave).port); 2282 port = &(SLAVE_AD_INFO(slave)->port);
2283 2283
2284 /* if slave is null, the whole port is not initialized */ 2284 /* if slave is null, the whole port is not initialized */
2285 if (!port->slave) { 2285 if (!port->slave) {
@@ -2347,7 +2347,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
2347 ret = 0; 2347 ret = 0;
2348 goto out; 2348 goto out;
2349 } 2349 }
2350 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator)); 2350 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
2351 if (active) { 2351 if (active) {
2352 /* are enough slaves available to consider link up? */ 2352 /* are enough slaves available to consider link up? */
2353 if (active->num_of_ports < bond->params.min_links) { 2353 if (active->num_of_ports < bond->params.min_links) {
@@ -2384,7 +2384,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
2384 struct port *port; 2384 struct port *port;
2385 2385
2386 bond_for_each_slave_rcu(bond, slave, iter) { 2386 bond_for_each_slave_rcu(bond, slave, iter) {
2387 port = &(SLAVE_AD_INFO(slave).port); 2387 port = &(SLAVE_AD_INFO(slave)->port);
2388 if (port->aggregator && port->aggregator->is_active) { 2388 if (port->aggregator && port->aggregator->is_active) {
2389 aggregator = port->aggregator; 2389 aggregator = port->aggregator;
2390 break; 2390 break;
@@ -2440,22 +2440,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2440 goto err_free; 2440 goto err_free;
2441 } 2441 }
2442 2442
2443 slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg); 2443 slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
2444 first_ok_slave = NULL; 2444 first_ok_slave = NULL;
2445 2445
2446 bond_for_each_slave_rcu(bond, slave, iter) { 2446 bond_for_each_slave_rcu(bond, slave, iter) {
2447 agg = SLAVE_AD_INFO(slave).port.aggregator; 2447 agg = SLAVE_AD_INFO(slave)->port.aggregator;
2448 if (!agg || agg->aggregator_identifier != agg_id) 2448 if (!agg || agg->aggregator_identifier != agg_id)
2449 continue; 2449 continue;
2450 2450
2451 if (slave_agg_no >= 0) { 2451 if (slave_agg_no >= 0) {
2452 if (!first_ok_slave && SLAVE_IS_OK(slave)) 2452 if (!first_ok_slave && bond_slave_can_tx(slave))
2453 first_ok_slave = slave; 2453 first_ok_slave = slave;
2454 slave_agg_no--; 2454 slave_agg_no--;
2455 continue; 2455 continue;
2456 } 2456 }
2457 2457
2458 if (SLAVE_IS_OK(slave)) { 2458 if (bond_slave_can_tx(slave)) {
2459 bond_dev_queue_xmit(bond, skb, slave->dev); 2459 bond_dev_queue_xmit(bond, skb, slave->dev);
2460 goto out; 2460 goto out;
2461 } 2461 }
@@ -2522,7 +2522,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
2522 2522
2523 lacp_fast = bond->params.lacp_fast; 2523 lacp_fast = bond->params.lacp_fast;
2524 bond_for_each_slave(bond, slave, iter) { 2524 bond_for_each_slave(bond, slave, iter) {
2525 port = &(SLAVE_AD_INFO(slave).port); 2525 port = &(SLAVE_AD_INFO(slave)->port);
2526 __get_state_machine_lock(port); 2526 __get_state_machine_lock(port);
2527 if (lacp_fast) 2527 if (lacp_fast)
2528 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; 2528 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 93580a47cc54..76c0dade233f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -229,7 +229,7 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
229 229
230 /* Find the slave with the largest gap */ 230 /* Find the slave with the largest gap */
231 bond_for_each_slave_rcu(bond, slave, iter) { 231 bond_for_each_slave_rcu(bond, slave, iter) {
232 if (SLAVE_IS_OK(slave)) { 232 if (bond_slave_can_tx(slave)) {
233 long long gap = compute_gap(slave); 233 long long gap = compute_gap(slave);
234 234
235 if (max_gap < gap) { 235 if (max_gap < gap) {
@@ -384,7 +384,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
384 bool found = false; 384 bool found = false;
385 385
386 bond_for_each_slave(bond, slave, iter) { 386 bond_for_each_slave(bond, slave, iter) {
387 if (!SLAVE_IS_OK(slave)) 387 if (!bond_slave_can_tx(slave))
388 continue; 388 continue;
389 if (!found) { 389 if (!found) {
390 if (!before || before->speed < slave->speed) 390 if (!before || before->speed < slave->speed)
@@ -417,7 +417,7 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
417 bool found = false; 417 bool found = false;
418 418
419 bond_for_each_slave_rcu(bond, slave, iter) { 419 bond_for_each_slave_rcu(bond, slave, iter) {
420 if (!SLAVE_IS_OK(slave)) 420 if (!bond_slave_can_tx(slave))
421 continue; 421 continue;
422 if (!found) { 422 if (!found) {
423 if (!before || before->speed < slave->speed) 423 if (!before || before->speed < slave->speed)
@@ -755,7 +755,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
755 /* Don't modify or load balance ARPs that do not originate locally 755 /* Don't modify or load balance ARPs that do not originate locally
756 * (e.g.,arrive via a bridge). 756 * (e.g.,arrive via a bridge).
757 */ 757 */
758 if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) 758 if (!bond_slave_has_mac_rx(bond, arp->mac_src))
759 return NULL; 759 return NULL;
760 760
761 if (arp->op_code == htons(ARPOP_REPLY)) { 761 if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1039,11 +1039,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1039 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1040 struct net_device *upper; 1040 struct net_device *upper;
1041 struct list_head *iter; 1041 struct list_head *iter;
1042 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
1042 1043
1043 /* send untagged */ 1044 /* send untagged */
1044 alb_send_lp_vid(slave, mac_addr, 0, 0); 1045 alb_send_lp_vid(slave, mac_addr, 0, 0);
1045 1046
1046 /* loop through vlans and send one packet for each */ 1047 /* loop through all devices and see if we need to send a packet
1048 * for that device.
1049 */
1047 rcu_read_lock(); 1050 rcu_read_lock();
1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1051 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { 1052 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
@@ -1059,6 +1062,16 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1059 vlan_dev_vlan_id(upper)); 1062 vlan_dev_vlan_id(upper));
1060 } 1063 }
1061 } 1064 }
1065
1066 /* If this is a macvlan device, then only send updates
1067 * when strict_match is turned off.
1068 */
1069 if (netif_is_macvlan(upper) && !strict_match) {
1070 memset(tags, 0, sizeof(tags));
1071 bond_verify_device_path(bond->dev, upper, tags);
1072 alb_send_lp_vid(slave, upper->dev_addr,
1073 tags[0].vlan_proto, tags[0].vlan_id);
1074 }
1062 } 1075 }
1063 rcu_read_unlock(); 1076 rcu_read_unlock();
1064} 1077}
@@ -1068,7 +1081,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
1068 struct net_device *dev = slave->dev; 1081 struct net_device *dev = slave->dev;
1069 struct sockaddr s_addr; 1082 struct sockaddr s_addr;
1070 1083
1071 if (slave->bond->params.mode == BOND_MODE_TLB) { 1084 if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
1072 memcpy(dev->dev_addr, addr, dev->addr_len); 1085 memcpy(dev->dev_addr, addr, dev->addr_len);
1073 return 0; 1086 return 0;
1074 } 1087 }
@@ -1111,13 +1124,13 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
1111static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, 1124static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1112 struct slave *slave2) 1125 struct slave *slave2)
1113{ 1126{
1114 int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); 1127 int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
1115 struct slave *disabled_slave = NULL; 1128 struct slave *disabled_slave = NULL;
1116 1129
1117 ASSERT_RTNL(); 1130 ASSERT_RTNL();
1118 1131
1119 /* fasten the change in the switch */ 1132 /* fasten the change in the switch */
1120 if (SLAVE_IS_OK(slave1)) { 1133 if (bond_slave_can_tx(slave1)) {
1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); 1134 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1122 if (bond->alb_info.rlb_enabled) { 1135 if (bond->alb_info.rlb_enabled) {
1123 /* inform the clients that the mac address 1136 /* inform the clients that the mac address
@@ -1129,7 +1142,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1129 disabled_slave = slave1; 1142 disabled_slave = slave1;
1130 } 1143 }
1131 1144
1132 if (SLAVE_IS_OK(slave2)) { 1145 if (bond_slave_can_tx(slave2)) {
1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); 1146 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1134 if (bond->alb_info.rlb_enabled) { 1147 if (bond->alb_info.rlb_enabled) {
1135 /* inform the clients that the mac address 1148 /* inform the clients that the mac address
@@ -1358,6 +1371,77 @@ void bond_alb_deinitialize(struct bonding *bond)
1358 rlb_deinitialize(bond); 1371 rlb_deinitialize(bond);
1359} 1372}
1360 1373
1374static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1375 struct slave *tx_slave)
1376{
1377 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1378 struct ethhdr *eth_data = eth_hdr(skb);
1379
1380 if (!tx_slave) {
1381 /* unbalanced or unassigned, send through primary */
1382 tx_slave = rcu_dereference(bond->curr_active_slave);
1383 if (bond->params.tlb_dynamic_lb)
1384 bond_info->unbalanced_load += skb->len;
1385 }
1386
1387 if (tx_slave && bond_slave_can_tx(tx_slave)) {
1388 if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
1389 ether_addr_copy(eth_data->h_source,
1390 tx_slave->dev->dev_addr);
1391 }
1392
1393 bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1394 goto out;
1395 }
1396
1397 if (tx_slave && bond->params.tlb_dynamic_lb) {
1398 _lock_tx_hashtbl(bond);
1399 __tlb_clear_slave(bond, tx_slave, 0);
1400 _unlock_tx_hashtbl(bond);
1401 }
1402
1403 /* no suitable interface, frame not sent */
1404 dev_kfree_skb_any(skb);
1405out:
1406 return NETDEV_TX_OK;
1407}
1408
1409int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1410{
1411 struct bonding *bond = netdev_priv(bond_dev);
1412 struct ethhdr *eth_data;
1413 struct slave *tx_slave = NULL;
1414 u32 hash_index;
1415
1416 skb_reset_mac_header(skb);
1417 eth_data = eth_hdr(skb);
1418
1419 /* Do not TX balance any multicast or broadcast */
1420 if (!is_multicast_ether_addr(eth_data->h_dest)) {
1421 switch (skb->protocol) {
1422 case htons(ETH_P_IP):
1423 case htons(ETH_P_IPX):
1424 /* In case of IPX, it will falback to L2 hash */
1425 case htons(ETH_P_IPV6):
1426 hash_index = bond_xmit_hash(bond, skb);
1427 if (bond->params.tlb_dynamic_lb) {
1428 tx_slave = tlb_choose_channel(bond,
1429 hash_index & 0xFF,
1430 skb->len);
1431 } else {
1432 struct list_head *iter;
1433 int idx = hash_index % bond->slave_cnt;
1434
1435 bond_for_each_slave_rcu(bond, tx_slave, iter)
1436 if (--idx < 0)
1437 break;
1438 }
1439 break;
1440 }
1441 }
1442 return bond_do_alb_xmit(skb, bond, tx_slave);
1443}
1444
1361int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) 1445int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1362{ 1446{
1363 struct bonding *bond = netdev_priv(bond_dev); 1447 struct bonding *bond = netdev_priv(bond_dev);
@@ -1366,7 +1450,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1366 struct slave *tx_slave = NULL; 1450 struct slave *tx_slave = NULL;
1367 static const __be32 ip_bcast = htonl(0xffffffff); 1451 static const __be32 ip_bcast = htonl(0xffffffff);
1368 int hash_size = 0; 1452 int hash_size = 0;
1369 int do_tx_balance = 1; 1453 bool do_tx_balance = true;
1370 u32 hash_index = 0; 1454 u32 hash_index = 0;
1371 const u8 *hash_start = NULL; 1455 const u8 *hash_start = NULL;
1372 struct ipv6hdr *ip6hdr; 1456 struct ipv6hdr *ip6hdr;
@@ -1381,7 +1465,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1381 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || 1465 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
1382 (iph->daddr == ip_bcast) || 1466 (iph->daddr == ip_bcast) ||
1383 (iph->protocol == IPPROTO_IGMP)) { 1467 (iph->protocol == IPPROTO_IGMP)) {
1384 do_tx_balance = 0; 1468 do_tx_balance = false;
1385 break; 1469 break;
1386 } 1470 }
1387 hash_start = (char *)&(iph->daddr); 1471 hash_start = (char *)&(iph->daddr);
@@ -1393,7 +1477,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1393 * that here just in case. 1477 * that here just in case.
1394 */ 1478 */
1395 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { 1479 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
1396 do_tx_balance = 0; 1480 do_tx_balance = false;
1397 break; 1481 break;
1398 } 1482 }
1399 1483
@@ -1401,7 +1485,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1401 * broadcasts in IPv4. 1485 * broadcasts in IPv4.
1402 */ 1486 */
1403 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) { 1487 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
1404 do_tx_balance = 0; 1488 do_tx_balance = false;
1405 break; 1489 break;
1406 } 1490 }
1407 1491
@@ -1411,7 +1495,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1411 */ 1495 */
1412 ip6hdr = ipv6_hdr(skb); 1496 ip6hdr = ipv6_hdr(skb);
1413 if (ipv6_addr_any(&ip6hdr->saddr)) { 1497 if (ipv6_addr_any(&ip6hdr->saddr)) {
1414 do_tx_balance = 0; 1498 do_tx_balance = false;
1415 break; 1499 break;
1416 } 1500 }
1417 1501
@@ -1421,7 +1505,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1421 case ETH_P_IPX: 1505 case ETH_P_IPX:
1422 if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { 1506 if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
1423 /* something is wrong with this packet */ 1507 /* something is wrong with this packet */
1424 do_tx_balance = 0; 1508 do_tx_balance = false;
1425 break; 1509 break;
1426 } 1510 }
1427 1511
@@ -1430,7 +1514,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1430 * this family since it has an "ARP" like 1514 * this family since it has an "ARP" like
1431 * mechanism 1515 * mechanism
1432 */ 1516 */
1433 do_tx_balance = 0; 1517 do_tx_balance = false;
1434 break; 1518 break;
1435 } 1519 }
1436 1520
@@ -1438,12 +1522,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1438 hash_size = ETH_ALEN; 1522 hash_size = ETH_ALEN;
1439 break; 1523 break;
1440 case ETH_P_ARP: 1524 case ETH_P_ARP:
1441 do_tx_balance = 0; 1525 do_tx_balance = false;
1442 if (bond_info->rlb_enabled) 1526 if (bond_info->rlb_enabled)
1443 tx_slave = rlb_arp_xmit(skb, bond); 1527 tx_slave = rlb_arp_xmit(skb, bond);
1444 break; 1528 break;
1445 default: 1529 default:
1446 do_tx_balance = 0; 1530 do_tx_balance = false;
1447 break; 1531 break;
1448 } 1532 }
1449 1533
@@ -1452,32 +1536,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1452 tx_slave = tlb_choose_channel(bond, hash_index, skb->len); 1536 tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
1453 } 1537 }
1454 1538
1455 if (!tx_slave) { 1539 return bond_do_alb_xmit(skb, bond, tx_slave);
1456 /* unbalanced or unassigned, send through primary */
1457 tx_slave = rcu_dereference(bond->curr_active_slave);
1458 bond_info->unbalanced_load += skb->len;
1459 }
1460
1461 if (tx_slave && SLAVE_IS_OK(tx_slave)) {
1462 if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
1463 ether_addr_copy(eth_data->h_source,
1464 tx_slave->dev->dev_addr);
1465 }
1466
1467 bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1468 goto out;
1469 }
1470
1471 if (tx_slave) {
1472 _lock_tx_hashtbl(bond);
1473 __tlb_clear_slave(bond, tx_slave, 0);
1474 _unlock_tx_hashtbl(bond);
1475 }
1476
1477 /* no suitable interface, frame not sent */
1478 dev_kfree_skb_any(skb);
1479out:
1480 return NETDEV_TX_OK;
1481} 1540}
1482 1541
1483void bond_alb_monitor(struct work_struct *work) 1542void bond_alb_monitor(struct work_struct *work)
@@ -1514,8 +1573,10 @@ void bond_alb_monitor(struct work_struct *work)
1514 /* If updating current_active, use all currently 1573 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only 1574 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device. 1575 * use mac of the slave device.
1576 * In RLB mode, we always use strict matches.
1517 */ 1577 */
1518 strict_match = (slave != bond->curr_active_slave); 1578 strict_match = (slave != bond->curr_active_slave ||
1579 bond_info->rlb_enabled);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr, 1580 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match); 1581 strict_match);
1521 } 1582 }
@@ -1719,7 +1780,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1719 /* in TLB mode, the slave might flip down/up with the old dev_addr, 1780 /* in TLB mode, the slave might flip down/up with the old dev_addr,
1720 * and thus filter bond->dev_addr's packets, so force bond's mac 1781 * and thus filter bond->dev_addr's packets, so force bond's mac
1721 */ 1782 */
1722 if (bond->params.mode == BOND_MODE_TLB) { 1783 if (BOND_MODE(bond) == BOND_MODE_TLB) {
1723 struct sockaddr sa; 1784 struct sockaddr sa;
1724 u8 tmp_addr[ETH_ALEN]; 1785 u8 tmp_addr[ETH_ALEN];
1725 1786
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e09dd4bfafff..5fc76c01636c 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
175void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); 175void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
176void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); 176void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
177int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); 177int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
178int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
178void bond_alb_monitor(struct work_struct *); 179void bond_alb_monitor(struct work_struct *);
179int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); 180int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
180void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); 181void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 2d3f7fa541ff..658e761c4568 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -23,7 +23,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
23 struct rlb_client_info *client_info; 23 struct rlb_client_info *client_info;
24 u32 hash_index; 24 u32 hash_index;
25 25
26 if (bond->params.mode != BOND_MODE_ALB) 26 if (BOND_MODE(bond) != BOND_MODE_ALB)
27 return 0; 27 return 0;
28 28
29 seq_printf(m, "SourceIP DestinationIP " 29 seq_printf(m, "SourceIP DestinationIP "
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d3a67896d435..04f35f960cb8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -343,7 +343,7 @@ static int bond_set_carrier(struct bonding *bond)
343 if (!bond_has_slaves(bond)) 343 if (!bond_has_slaves(bond))
344 goto down; 344 goto down;
345 345
346 if (bond->params.mode == BOND_MODE_8023AD) 346 if (BOND_MODE(bond) == BOND_MODE_8023AD)
347 return bond_3ad_set_carrier(bond); 347 return bond_3ad_set_carrier(bond);
348 348
349 bond_for_each_slave(bond, slave, iter) { 349 bond_for_each_slave(bond, slave, iter) {
@@ -497,7 +497,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
497 struct list_head *iter; 497 struct list_head *iter;
498 int err = 0; 498 int err = 0;
499 499
500 if (USES_PRIMARY(bond->params.mode)) { 500 if (bond_uses_primary(bond)) {
501 /* write lock already acquired */ 501 /* write lock already acquired */
502 if (bond->curr_active_slave) { 502 if (bond->curr_active_slave) {
503 err = dev_set_promiscuity(bond->curr_active_slave->dev, 503 err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@ -523,7 +523,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
523 struct list_head *iter; 523 struct list_head *iter;
524 int err = 0; 524 int err = 0;
525 525
526 if (USES_PRIMARY(bond->params.mode)) { 526 if (bond_uses_primary(bond)) {
527 /* write lock already acquired */ 527 /* write lock already acquired */
528 if (bond->curr_active_slave) { 528 if (bond->curr_active_slave) {
529 err = dev_set_allmulti(bond->curr_active_slave->dev, 529 err = dev_set_allmulti(bond->curr_active_slave->dev,
@@ -574,7 +574,7 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
574 dev_uc_unsync(slave_dev, bond_dev); 574 dev_uc_unsync(slave_dev, bond_dev);
575 dev_mc_unsync(slave_dev, bond_dev); 575 dev_mc_unsync(slave_dev, bond_dev);
576 576
577 if (bond->params.mode == BOND_MODE_8023AD) { 577 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
578 /* del lacpdu mc addr from mc list */ 578 /* del lacpdu mc addr from mc list */
579 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 579 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
580 580
@@ -585,8 +585,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
585/*--------------------------- Active slave change ---------------------------*/ 585/*--------------------------- Active slave change ---------------------------*/
586 586
587/* Update the hardware address list and promisc/allmulti for the new and 587/* Update the hardware address list and promisc/allmulti for the new and
588 * old active slaves (if any). Modes that are !USES_PRIMARY keep all 588 * old active slaves (if any). Modes that are not using primary keep all
589 * slaves up date at all times; only the USES_PRIMARY modes need to call 589 * slaves up date at all times; only the modes that use primary need to call
590 * this function to swap these settings during a failover. 590 * this function to swap these settings during a failover.
591 */ 591 */
592static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, 592static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@ -747,7 +747,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
747 bond_for_each_slave(bond, slave, iter) { 747 bond_for_each_slave(bond, slave, iter) {
748 if (slave->link == BOND_LINK_UP) 748 if (slave->link == BOND_LINK_UP)
749 return slave; 749 return slave;
750 if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) && 750 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
751 slave->delay < mintime) { 751 slave->delay < mintime) {
752 mintime = slave->delay; 752 mintime = slave->delay;
753 bestslave = slave; 753 bestslave = slave;
@@ -801,7 +801,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
801 new_active->last_link_up = jiffies; 801 new_active->last_link_up = jiffies;
802 802
803 if (new_active->link == BOND_LINK_BACK) { 803 if (new_active->link == BOND_LINK_BACK) {
804 if (USES_PRIMARY(bond->params.mode)) { 804 if (bond_uses_primary(bond)) {
805 pr_info("%s: making interface %s the new active one %d ms earlier\n", 805 pr_info("%s: making interface %s the new active one %d ms earlier\n",
806 bond->dev->name, new_active->dev->name, 806 bond->dev->name, new_active->dev->name,
807 (bond->params.updelay - new_active->delay) * bond->params.miimon); 807 (bond->params.updelay - new_active->delay) * bond->params.miimon);
@@ -810,20 +810,20 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
810 new_active->delay = 0; 810 new_active->delay = 0;
811 new_active->link = BOND_LINK_UP; 811 new_active->link = BOND_LINK_UP;
812 812
813 if (bond->params.mode == BOND_MODE_8023AD) 813 if (BOND_MODE(bond) == BOND_MODE_8023AD)
814 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 814 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
815 815
816 if (bond_is_lb(bond)) 816 if (bond_is_lb(bond))
817 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 817 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
818 } else { 818 } else {
819 if (USES_PRIMARY(bond->params.mode)) { 819 if (bond_uses_primary(bond)) {
820 pr_info("%s: making interface %s the new active one\n", 820 pr_info("%s: making interface %s the new active one\n",
821 bond->dev->name, new_active->dev->name); 821 bond->dev->name, new_active->dev->name);
822 } 822 }
823 } 823 }
824 } 824 }
825 825
826 if (USES_PRIMARY(bond->params.mode)) 826 if (bond_uses_primary(bond))
827 bond_hw_addr_swap(bond, new_active, old_active); 827 bond_hw_addr_swap(bond, new_active, old_active);
828 828
829 if (bond_is_lb(bond)) { 829 if (bond_is_lb(bond)) {
@@ -838,7 +838,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
838 rcu_assign_pointer(bond->curr_active_slave, new_active); 838 rcu_assign_pointer(bond->curr_active_slave, new_active);
839 } 839 }
840 840
841 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 841 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
842 if (old_active) 842 if (old_active)
843 bond_set_slave_inactive_flags(old_active, 843 bond_set_slave_inactive_flags(old_active,
844 BOND_SLAVE_NOTIFY_NOW); 844 BOND_SLAVE_NOTIFY_NOW);
@@ -876,8 +876,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
876 * resend only if bond is brought up with the affected 876 * resend only if bond is brought up with the affected
877 * bonding modes and the retransmission is enabled */ 877 * bonding modes and the retransmission is enabled */
878 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && 878 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
879 ((USES_PRIMARY(bond->params.mode) && new_active) || 879 ((bond_uses_primary(bond) && new_active) ||
880 bond->params.mode == BOND_MODE_ROUNDROBIN)) { 880 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
881 bond->igmp_retrans = bond->params.resend_igmp; 881 bond->igmp_retrans = bond->params.resend_igmp;
882 queue_delayed_work(bond->wq, &bond->mcast_work, 1); 882 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
883 } 883 }
@@ -958,7 +958,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
958 struct slave *slave; 958 struct slave *slave;
959 959
960 bond_for_each_slave(bond, slave, iter) 960 bond_for_each_slave(bond, slave, iter)
961 if (IS_UP(slave->dev)) 961 if (bond_slave_is_up(slave))
962 slave_disable_netpoll(slave); 962 slave_disable_netpoll(slave);
963} 963}
964 964
@@ -1038,6 +1038,7 @@ static void bond_compute_features(struct bonding *bond)
1038 1038
1039 if (!bond_has_slaves(bond)) 1039 if (!bond_has_slaves(bond))
1040 goto done; 1040 goto done;
1041 vlan_features &= NETIF_F_ALL_FOR_ALL;
1041 1042
1042 bond_for_each_slave(bond, slave, iter) { 1043 bond_for_each_slave(bond, slave, iter) {
1043 vlan_features = netdev_increment_features(vlan_features, 1044 vlan_features = netdev_increment_features(vlan_features,
@@ -1084,7 +1085,7 @@ static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1084 struct bonding *bond) 1085 struct bonding *bond)
1085{ 1086{
1086 if (bond_is_slave_inactive(slave)) { 1087 if (bond_is_slave_inactive(slave)) {
1087 if (bond->params.mode == BOND_MODE_ALB && 1088 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1088 skb->pkt_type != PACKET_BROADCAST && 1089 skb->pkt_type != PACKET_BROADCAST &&
1089 skb->pkt_type != PACKET_MULTICAST) 1090 skb->pkt_type != PACKET_MULTICAST)
1090 return false; 1091 return false;
@@ -1126,7 +1127,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1126 1127
1127 skb->dev = bond->dev; 1128 skb->dev = bond->dev;
1128 1129
1129 if (bond->params.mode == BOND_MODE_ALB && 1130 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1130 bond->dev->priv_flags & IFF_BRIDGE_PORT && 1131 bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1131 skb->pkt_type == PACKET_HOST) { 1132 skb->pkt_type == PACKET_HOST) {
1132 1133
@@ -1163,6 +1164,35 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
1163 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); 1164 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
1164} 1165}
1165 1166
1167static struct slave *bond_alloc_slave(struct bonding *bond)
1168{
1169 struct slave *slave = NULL;
1170
1171 slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1172 if (!slave)
1173 return NULL;
1174
1175 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1176 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1177 GFP_KERNEL);
1178 if (!SLAVE_AD_INFO(slave)) {
1179 kfree(slave);
1180 return NULL;
1181 }
1182 }
1183 return slave;
1184}
1185
1186static void bond_free_slave(struct slave *slave)
1187{
1188 struct bonding *bond = bond_get_bond_by_slave(slave);
1189
1190 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1191 kfree(SLAVE_AD_INFO(slave));
1192
1193 kfree(slave);
1194}
1195
1166/* enslave device <slave> to bond device <master> */ 1196/* enslave device <slave> to bond device <master> */
1167int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1197int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1168{ 1198{
@@ -1269,7 +1299,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1269 if (!bond_has_slaves(bond)) { 1299 if (!bond_has_slaves(bond)) {
1270 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n", 1300 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
1271 bond_dev->name); 1301 bond_dev->name);
1272 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 1302 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1273 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1303 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1274 pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n", 1304 pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
1275 bond_dev->name); 1305 bond_dev->name);
@@ -1290,11 +1320,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1290 bond->dev->addr_assign_type == NET_ADDR_RANDOM) 1320 bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1291 bond_set_dev_addr(bond->dev, slave_dev); 1321 bond_set_dev_addr(bond->dev, slave_dev);
1292 1322
1293 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1323 new_slave = bond_alloc_slave(bond);
1294 if (!new_slave) { 1324 if (!new_slave) {
1295 res = -ENOMEM; 1325 res = -ENOMEM;
1296 goto err_undo_flags; 1326 goto err_undo_flags;
1297 } 1327 }
1328
1329 new_slave->bond = bond;
1330 new_slave->dev = slave_dev;
1298 /* 1331 /*
1299 * Set the new_slave's queue_id to be zero. Queue ID mapping 1332 * Set the new_slave's queue_id to be zero. Queue ID mapping
1300 * is set via sysfs or module option if desired. 1333 * is set via sysfs or module option if desired.
@@ -1317,7 +1350,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1317 ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr); 1350 ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
1318 1351
1319 if (!bond->params.fail_over_mac || 1352 if (!bond->params.fail_over_mac ||
1320 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1353 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1321 /* 1354 /*
1322 * Set slave to master's mac address. The application already 1355 * Set slave to master's mac address. The application already
1323 * set the master's mac address to that of the first slave 1356 * set the master's mac address to that of the first slave
@@ -1338,8 +1371,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1338 goto err_restore_mac; 1371 goto err_restore_mac;
1339 } 1372 }
1340 1373
1341 new_slave->bond = bond;
1342 new_slave->dev = slave_dev;
1343 slave_dev->priv_flags |= IFF_BONDING; 1374 slave_dev->priv_flags |= IFF_BONDING;
1344 1375
1345 if (bond_is_lb(bond)) { 1376 if (bond_is_lb(bond)) {
@@ -1351,10 +1382,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1351 goto err_close; 1382 goto err_close;
1352 } 1383 }
1353 1384
1354 /* If the mode USES_PRIMARY, then the following is handled by 1385 /* If the mode uses primary, then the following is handled by
1355 * bond_change_active_slave(). 1386 * bond_change_active_slave().
1356 */ 1387 */
1357 if (!USES_PRIMARY(bond->params.mode)) { 1388 if (!bond_uses_primary(bond)) {
1358 /* set promiscuity level to new slave */ 1389 /* set promiscuity level to new slave */
1359 if (bond_dev->flags & IFF_PROMISC) { 1390 if (bond_dev->flags & IFF_PROMISC) {
1360 res = dev_set_promiscuity(slave_dev, 1); 1391 res = dev_set_promiscuity(slave_dev, 1);
@@ -1377,7 +1408,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1377 netif_addr_unlock_bh(bond_dev); 1408 netif_addr_unlock_bh(bond_dev);
1378 } 1409 }
1379 1410
1380 if (bond->params.mode == BOND_MODE_8023AD) { 1411 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1381 /* add lacpdu mc addr to mc list */ 1412 /* add lacpdu mc addr to mc list */
1382 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 1413 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1383 1414
@@ -1450,7 +1481,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1450 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1481 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1451 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1482 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1452 1483
1453 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1484 if (bond_uses_primary(bond) && bond->params.primary[0]) {
1454 /* if there is a primary slave, remember it */ 1485 /* if there is a primary slave, remember it */
1455 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 1486 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1456 bond->primary_slave = new_slave; 1487 bond->primary_slave = new_slave;
@@ -1458,7 +1489,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1458 } 1489 }
1459 } 1490 }
1460 1491
1461 switch (bond->params.mode) { 1492 switch (BOND_MODE(bond)) {
1462 case BOND_MODE_ACTIVEBACKUP: 1493 case BOND_MODE_ACTIVEBACKUP:
1463 bond_set_slave_inactive_flags(new_slave, 1494 bond_set_slave_inactive_flags(new_slave,
1464 BOND_SLAVE_NOTIFY_NOW); 1495 BOND_SLAVE_NOTIFY_NOW);
@@ -1471,14 +1502,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1471 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 1502 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
1472 /* if this is the first slave */ 1503 /* if this is the first slave */
1473 if (!prev_slave) { 1504 if (!prev_slave) {
1474 SLAVE_AD_INFO(new_slave).id = 1; 1505 SLAVE_AD_INFO(new_slave)->id = 1;
1475 /* Initialize AD with the number of times that the AD timer is called in 1 second 1506 /* Initialize AD with the number of times that the AD timer is called in 1 second
1476 * can be called only after the mac address of the bond is set 1507 * can be called only after the mac address of the bond is set
1477 */ 1508 */
1478 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); 1509 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1479 } else { 1510 } else {
1480 SLAVE_AD_INFO(new_slave).id = 1511 SLAVE_AD_INFO(new_slave)->id =
1481 SLAVE_AD_INFO(prev_slave).id + 1; 1512 SLAVE_AD_INFO(prev_slave)->id + 1;
1482 } 1513 }
1483 1514
1484 bond_3ad_bind_slave(new_slave); 1515 bond_3ad_bind_slave(new_slave);
@@ -1539,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1539 bond_compute_features(bond); 1570 bond_compute_features(bond);
1540 bond_set_carrier(bond); 1571 bond_set_carrier(bond);
1541 1572
1542 if (USES_PRIMARY(bond->params.mode)) { 1573 if (bond_uses_primary(bond)) {
1543 block_netpoll_tx(); 1574 block_netpoll_tx();
1544 write_lock_bh(&bond->curr_slave_lock); 1575 write_lock_bh(&bond->curr_slave_lock);
1545 bond_select_active_slave(bond); 1576 bond_select_active_slave(bond);
@@ -1563,7 +1594,7 @@ err_unregister:
1563 netdev_rx_handler_unregister(slave_dev); 1594 netdev_rx_handler_unregister(slave_dev);
1564 1595
1565err_detach: 1596err_detach:
1566 if (!USES_PRIMARY(bond->params.mode)) 1597 if (!bond_uses_primary(bond))
1567 bond_hw_addr_flush(bond_dev, slave_dev); 1598 bond_hw_addr_flush(bond_dev, slave_dev);
1568 1599
1569 vlan_vids_del_by_dev(slave_dev, bond_dev); 1600 vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1585,7 +1616,7 @@ err_close:
1585 1616
1586err_restore_mac: 1617err_restore_mac:
1587 if (!bond->params.fail_over_mac || 1618 if (!bond->params.fail_over_mac ||
1588 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1619 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1589 /* XXX TODO - fom follow mode needs to change master's 1620 /* XXX TODO - fom follow mode needs to change master's
1590 * MAC if this slave's MAC is in use by the bond, or at 1621 * MAC if this slave's MAC is in use by the bond, or at
1591 * least print a warning. 1622 * least print a warning.
@@ -1599,7 +1630,7 @@ err_restore_mtu:
1599 dev_set_mtu(slave_dev, new_slave->original_mtu); 1630 dev_set_mtu(slave_dev, new_slave->original_mtu);
1600 1631
1601err_free: 1632err_free:
1602 kfree(new_slave); 1633 bond_free_slave(new_slave);
1603 1634
1604err_undo_flags: 1635err_undo_flags:
1605 /* Enslave of first slave has failed and we need to fix master's mac */ 1636 /* Enslave of first slave has failed and we need to fix master's mac */
@@ -1661,7 +1692,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1661 write_lock_bh(&bond->lock); 1692 write_lock_bh(&bond->lock);
1662 1693
1663 /* Inform AD package of unbinding of slave. */ 1694 /* Inform AD package of unbinding of slave. */
1664 if (bond->params.mode == BOND_MODE_8023AD) 1695 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1665 bond_3ad_unbind_slave(slave); 1696 bond_3ad_unbind_slave(slave);
1666 1697
1667 write_unlock_bh(&bond->lock); 1698 write_unlock_bh(&bond->lock);
@@ -1676,7 +1707,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1676 bond->current_arp_slave = NULL; 1707 bond->current_arp_slave = NULL;
1677 1708
1678 if (!all && (!bond->params.fail_over_mac || 1709 if (!all && (!bond->params.fail_over_mac ||
1679 bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { 1710 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1680 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 1711 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1681 bond_has_slaves(bond)) 1712 bond_has_slaves(bond))
1682 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n", 1713 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
@@ -1748,10 +1779,10 @@ static int __bond_release_one(struct net_device *bond_dev,
1748 /* must do this from outside any spinlocks */ 1779 /* must do this from outside any spinlocks */
1749 vlan_vids_del_by_dev(slave_dev, bond_dev); 1780 vlan_vids_del_by_dev(slave_dev, bond_dev);
1750 1781
1751 /* If the mode USES_PRIMARY, then this cases was handled above by 1782 /* If the mode uses primary, then this cases was handled above by
1752 * bond_change_active_slave(..., NULL) 1783 * bond_change_active_slave(..., NULL)
1753 */ 1784 */
1754 if (!USES_PRIMARY(bond->params.mode)) { 1785 if (!bond_uses_primary(bond)) {
1755 /* unset promiscuity level from slave 1786 /* unset promiscuity level from slave
1756 * NOTE: The NETDEV_CHANGEADDR call above may change the value 1787 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1757 * of the IFF_PROMISC flag in the bond_dev, but we need the 1788 * of the IFF_PROMISC flag in the bond_dev, but we need the
@@ -1775,7 +1806,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1775 dev_close(slave_dev); 1806 dev_close(slave_dev);
1776 1807
1777 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || 1808 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1778 bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 1809 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1779 /* restore original ("permanent") mac address */ 1810 /* restore original ("permanent") mac address */
1780 ether_addr_copy(addr.sa_data, slave->perm_hwaddr); 1811 ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
1781 addr.sa_family = slave_dev->type; 1812 addr.sa_family = slave_dev->type;
@@ -1786,7 +1817,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1786 1817
1787 slave_dev->priv_flags &= ~IFF_BONDING; 1818 slave_dev->priv_flags &= ~IFF_BONDING;
1788 1819
1789 kfree(slave); 1820 bond_free_slave(slave);
1790 1821
1791 return 0; /* deletion OK */ 1822 return 0; /* deletion OK */
1792} 1823}
@@ -1821,7 +1852,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1821{ 1852{
1822 struct bonding *bond = netdev_priv(bond_dev); 1853 struct bonding *bond = netdev_priv(bond_dev);
1823 1854
1824 info->bond_mode = bond->params.mode; 1855 info->bond_mode = BOND_MODE(bond);
1825 info->miimon = bond->params.miimon; 1856 info->miimon = bond->params.miimon;
1826 1857
1827 info->num_slaves = bond->slave_cnt; 1858 info->num_slaves = bond->slave_cnt;
@@ -1877,7 +1908,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1877 if (slave->delay) { 1908 if (slave->delay) {
1878 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n", 1909 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
1879 bond->dev->name, 1910 bond->dev->name,
1880 (bond->params.mode == 1911 (BOND_MODE(bond) ==
1881 BOND_MODE_ACTIVEBACKUP) ? 1912 BOND_MODE_ACTIVEBACKUP) ?
1882 (bond_is_active_slave(slave) ? 1913 (bond_is_active_slave(slave) ?
1883 "active " : "backup ") : "", 1914 "active " : "backup ") : "",
@@ -1968,10 +1999,10 @@ static void bond_miimon_commit(struct bonding *bond)
1968 slave->link = BOND_LINK_UP; 1999 slave->link = BOND_LINK_UP;
1969 slave->last_link_up = jiffies; 2000 slave->last_link_up = jiffies;
1970 2001
1971 if (bond->params.mode == BOND_MODE_8023AD) { 2002 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1972 /* prevent it from being the active one */ 2003 /* prevent it from being the active one */
1973 bond_set_backup_slave(slave); 2004 bond_set_backup_slave(slave);
1974 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 2005 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1975 /* make it immediately active */ 2006 /* make it immediately active */
1976 bond_set_active_slave(slave); 2007 bond_set_active_slave(slave);
1977 } else if (slave != bond->primary_slave) { 2008 } else if (slave != bond->primary_slave) {
@@ -1985,7 +2016,7 @@ static void bond_miimon_commit(struct bonding *bond)
1985 slave->duplex ? "full" : "half"); 2016 slave->duplex ? "full" : "half");
1986 2017
1987 /* notify ad that the link status has changed */ 2018 /* notify ad that the link status has changed */
1988 if (bond->params.mode == BOND_MODE_8023AD) 2019 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1989 bond_3ad_handle_link_change(slave, BOND_LINK_UP); 2020 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
1990 2021
1991 if (bond_is_lb(bond)) 2022 if (bond_is_lb(bond))
@@ -2004,15 +2035,15 @@ static void bond_miimon_commit(struct bonding *bond)
2004 2035
2005 slave->link = BOND_LINK_DOWN; 2036 slave->link = BOND_LINK_DOWN;
2006 2037
2007 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || 2038 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2008 bond->params.mode == BOND_MODE_8023AD) 2039 BOND_MODE(bond) == BOND_MODE_8023AD)
2009 bond_set_slave_inactive_flags(slave, 2040 bond_set_slave_inactive_flags(slave,
2010 BOND_SLAVE_NOTIFY_NOW); 2041 BOND_SLAVE_NOTIFY_NOW);
2011 2042
2012 pr_info("%s: link status definitely down for interface %s, disabling it\n", 2043 pr_info("%s: link status definitely down for interface %s, disabling it\n",
2013 bond->dev->name, slave->dev->name); 2044 bond->dev->name, slave->dev->name);
2014 2045
2015 if (bond->params.mode == BOND_MODE_8023AD) 2046 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2016 bond_3ad_handle_link_change(slave, 2047 bond_3ad_handle_link_change(slave,
2017 BOND_LINK_DOWN); 2048 BOND_LINK_DOWN);
2018 2049
@@ -2175,9 +2206,9 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2175 * When the path is validated, collect any vlan information in the 2206 * When the path is validated, collect any vlan information in the
2176 * path. 2207 * path.
2177 */ 2208 */
2178static bool bond_verify_device_path(struct net_device *start_dev, 2209bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev, 2210 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags) 2211 struct bond_vlan_tag *tags)
2181{ 2212{
2182 struct net_device *upper; 2213 struct net_device *upper;
2183 struct list_head *iter; 2214 struct list_head *iter;
@@ -2287,8 +2318,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2287 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2318 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2288 2319
2289 if (!slave_do_arp_validate(bond, slave)) { 2320 if (!slave_do_arp_validate(bond, slave)) {
2290 if ((slave_do_arp_validate_only(bond, slave) && is_arp) || 2321 if ((slave_do_arp_validate_only(bond) && is_arp) ||
2291 !slave_do_arp_validate_only(bond, slave)) 2322 !slave_do_arp_validate_only(bond))
2292 slave->last_rx = jiffies; 2323 slave->last_rx = jiffies;
2293 return RX_HANDLER_ANOTHER; 2324 return RX_HANDLER_ANOTHER;
2294 } else if (!is_arp) { 2325 } else if (!is_arp) {
@@ -2456,7 +2487,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2456 * do - all replies will be rx'ed on same link causing slaves 2487 * do - all replies will be rx'ed on same link causing slaves
2457 * to be unstable during low/no traffic periods 2488 * to be unstable during low/no traffic periods
2458 */ 2489 */
2459 if (IS_UP(slave->dev)) 2490 if (bond_slave_is_up(slave))
2460 bond_arp_send_all(bond, slave); 2491 bond_arp_send_all(bond, slave);
2461 } 2492 }
2462 2493
@@ -2678,10 +2709,10 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2678 bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); 2709 bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2679 2710
2680 bond_for_each_slave_rcu(bond, slave, iter) { 2711 bond_for_each_slave_rcu(bond, slave, iter) {
2681 if (!found && !before && IS_UP(slave->dev)) 2712 if (!found && !before && bond_slave_is_up(slave))
2682 before = slave; 2713 before = slave;
2683 2714
2684 if (found && !new_slave && IS_UP(slave->dev)) 2715 if (found && !new_slave && bond_slave_is_up(slave))
2685 new_slave = slave; 2716 new_slave = slave;
2686 /* if the link state is up at this point, we 2717 /* if the link state is up at this point, we
2687 * mark it down - this can happen if we have 2718 * mark it down - this can happen if we have
@@ -2690,7 +2721,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2690 * one the current slave so it is still marked 2721 * one the current slave so it is still marked
2691 * up when it is actually down 2722 * up when it is actually down
2692 */ 2723 */
2693 if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { 2724 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2694 slave->link = BOND_LINK_DOWN; 2725 slave->link = BOND_LINK_DOWN;
2695 if (slave->link_failure_count < UINT_MAX) 2726 if (slave->link_failure_count < UINT_MAX)
2696 slave->link_failure_count++; 2727 slave->link_failure_count++;
@@ -2853,7 +2884,7 @@ static int bond_slave_netdev_event(unsigned long event,
2853 2884
2854 bond_update_speed_duplex(slave); 2885 bond_update_speed_duplex(slave);
2855 2886
2856 if (bond->params.mode == BOND_MODE_8023AD) { 2887 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2857 if (old_speed != slave->speed) 2888 if (old_speed != slave->speed)
2858 bond_3ad_adapter_speed_changed(slave); 2889 bond_3ad_adapter_speed_changed(slave);
2859 if (old_duplex != slave->duplex) 2890 if (old_duplex != slave->duplex)
@@ -2881,7 +2912,7 @@ static int bond_slave_netdev_event(unsigned long event,
2881 break; 2912 break;
2882 case NETDEV_CHANGENAME: 2913 case NETDEV_CHANGENAME:
2883 /* we don't care if we don't have primary set */ 2914 /* we don't care if we don't have primary set */
2884 if (!USES_PRIMARY(bond->params.mode) || 2915 if (!bond_uses_primary(bond) ||
2885 !bond->params.primary[0]) 2916 !bond->params.primary[0])
2886 break; 2917 break;
2887 2918
@@ -3011,20 +3042,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
3011 * bond_xmit_hash - generate a hash value based on the xmit policy 3042 * bond_xmit_hash - generate a hash value based on the xmit policy
3012 * @bond: bonding device 3043 * @bond: bonding device
3013 * @skb: buffer to use for headers 3044 * @skb: buffer to use for headers
3014 * @count: modulo value
3015 * 3045 *
3016 * This function will extract the necessary headers from the skb buffer and use 3046 * This function will extract the necessary headers from the skb buffer and use
3017 * them to generate a hash based on the xmit_policy set in the bonding device 3047 * them to generate a hash based on the xmit_policy set in the bonding device
3018 * which will be reduced modulo count before returning.
3019 */ 3048 */
3020int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count) 3049u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3021{ 3050{
3022 struct flow_keys flow; 3051 struct flow_keys flow;
3023 u32 hash; 3052 u32 hash;
3024 3053
3025 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || 3054 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3026 !bond_flow_dissect(bond, skb, &flow)) 3055 !bond_flow_dissect(bond, skb, &flow))
3027 return bond_eth_hash(skb) % count; 3056 return bond_eth_hash(skb);
3028 3057
3029 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || 3058 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3030 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) 3059 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3035,7 +3064,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
3035 hash ^= (hash >> 16); 3064 hash ^= (hash >> 16);
3036 hash ^= (hash >> 8); 3065 hash ^= (hash >> 8);
3037 3066
3038 return hash % count; 3067 return hash;
3039} 3068}
3040 3069
3041/*-------------------------- Device entry points ----------------------------*/ 3070/*-------------------------- Device entry points ----------------------------*/
@@ -3046,7 +3075,7 @@ static void bond_work_init_all(struct bonding *bond)
3046 bond_resend_igmp_join_requests_delayed); 3075 bond_resend_igmp_join_requests_delayed);
3047 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3076 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3048 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); 3077 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3049 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3078 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3050 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); 3079 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
3051 else 3080 else
3052 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); 3081 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@ -3073,7 +3102,7 @@ static int bond_open(struct net_device *bond_dev)
3073 if (bond_has_slaves(bond)) { 3102 if (bond_has_slaves(bond)) {
3074 read_lock(&bond->curr_slave_lock); 3103 read_lock(&bond->curr_slave_lock);
3075 bond_for_each_slave(bond, slave, iter) { 3104 bond_for_each_slave(bond, slave, iter) {
3076 if (USES_PRIMARY(bond->params.mode) 3105 if (bond_uses_primary(bond)
3077 && (slave != bond->curr_active_slave)) { 3106 && (slave != bond->curr_active_slave)) {
3078 bond_set_slave_inactive_flags(slave, 3107 bond_set_slave_inactive_flags(slave,
3079 BOND_SLAVE_NOTIFY_NOW); 3108 BOND_SLAVE_NOTIFY_NOW);
@@ -3092,9 +3121,10 @@ static int bond_open(struct net_device *bond_dev)
3092 /* bond_alb_initialize must be called before the timer 3121 /* bond_alb_initialize must be called before the timer
3093 * is started. 3122 * is started.
3094 */ 3123 */
3095 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) 3124 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3096 return -ENOMEM; 3125 return -ENOMEM;
3097 queue_delayed_work(bond->wq, &bond->alb_work, 0); 3126 if (bond->params.tlb_dynamic_lb)
3127 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3098 } 3128 }
3099 3129
3100 if (bond->params.miimon) /* link check interval, in milliseconds. */ 3130 if (bond->params.miimon) /* link check interval, in milliseconds. */
@@ -3105,7 +3135,7 @@ static int bond_open(struct net_device *bond_dev)
3105 bond->recv_probe = bond_arp_rcv; 3135 bond->recv_probe = bond_arp_rcv;
3106 } 3136 }
3107 3137
3108 if (bond->params.mode == BOND_MODE_8023AD) { 3138 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3109 queue_delayed_work(bond->wq, &bond->ad_work, 0); 3139 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3110 /* register to receive LACPDUs */ 3140 /* register to receive LACPDUs */
3111 bond->recv_probe = bond_3ad_lacpdu_recv; 3141 bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3310,7 +3340,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
3310 3340
3311 3341
3312 rcu_read_lock(); 3342 rcu_read_lock();
3313 if (USES_PRIMARY(bond->params.mode)) { 3343 if (bond_uses_primary(bond)) {
3314 slave = rcu_dereference(bond->curr_active_slave); 3344 slave = rcu_dereference(bond->curr_active_slave);
3315 if (slave) { 3345 if (slave) {
3316 dev_uc_sync(slave->dev, bond_dev); 3346 dev_uc_sync(slave->dev, bond_dev);
@@ -3464,7 +3494,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3464 struct list_head *iter; 3494 struct list_head *iter;
3465 int res = 0; 3495 int res = 0;
3466 3496
3467 if (bond->params.mode == BOND_MODE_ALB) 3497 if (BOND_MODE(bond) == BOND_MODE_ALB)
3468 return bond_alb_set_mac_address(bond_dev, addr); 3498 return bond_alb_set_mac_address(bond_dev, addr);
3469 3499
3470 3500
@@ -3475,7 +3505,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3475 * Returning an error causes ifenslave to fail. 3505 * Returning an error causes ifenslave to fail.
3476 */ 3506 */
3477 if (bond->params.fail_over_mac && 3507 if (bond->params.fail_over_mac &&
3478 bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3508 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3479 return 0; 3509 return 0;
3480 3510
3481 if (!is_valid_ether_addr(sa->sa_data)) 3511 if (!is_valid_ether_addr(sa->sa_data))
@@ -3555,7 +3585,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
3555 /* Here we start from the slave with slave_id */ 3585 /* Here we start from the slave with slave_id */
3556 bond_for_each_slave_rcu(bond, slave, iter) { 3586 bond_for_each_slave_rcu(bond, slave, iter) {
3557 if (--i < 0) { 3587 if (--i < 0) {
3558 if (slave_can_tx(slave)) { 3588 if (bond_slave_can_tx(slave)) {
3559 bond_dev_queue_xmit(bond, skb, slave->dev); 3589 bond_dev_queue_xmit(bond, skb, slave->dev);
3560 return; 3590 return;
3561 } 3591 }
@@ -3567,7 +3597,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
3567 bond_for_each_slave_rcu(bond, slave, iter) { 3597 bond_for_each_slave_rcu(bond, slave, iter) {
3568 if (--i < 0) 3598 if (--i < 0)
3569 break; 3599 break;
3570 if (slave_can_tx(slave)) { 3600 if (bond_slave_can_tx(slave)) {
3571 bond_dev_queue_xmit(bond, skb, slave->dev); 3601 bond_dev_queue_xmit(bond, skb, slave->dev);
3572 return; 3602 return;
3573 } 3603 }
@@ -3624,7 +3654,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3624 */ 3654 */
3625 if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { 3655 if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3626 slave = rcu_dereference(bond->curr_active_slave); 3656 slave = rcu_dereference(bond->curr_active_slave);
3627 if (slave && slave_can_tx(slave)) 3657 if (slave && bond_slave_can_tx(slave))
3628 bond_dev_queue_xmit(bond, skb, slave->dev); 3658 bond_dev_queue_xmit(bond, skb, slave->dev);
3629 else 3659 else
3630 bond_xmit_slave_id(bond, skb, 0); 3660 bond_xmit_slave_id(bond, skb, 0);
@@ -3662,7 +3692,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
3662{ 3692{
3663 struct bonding *bond = netdev_priv(bond_dev); 3693 struct bonding *bond = netdev_priv(bond_dev);
3664 3694
3665 bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt)); 3695 bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
3666 3696
3667 return NETDEV_TX_OK; 3697 return NETDEV_TX_OK;
3668} 3698}
@@ -3677,7 +3707,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3677 bond_for_each_slave_rcu(bond, slave, iter) { 3707 bond_for_each_slave_rcu(bond, slave, iter) {
3678 if (bond_is_last_slave(bond, slave)) 3708 if (bond_is_last_slave(bond, slave))
3679 break; 3709 break;
3680 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { 3710 if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3681 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 3711 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3682 3712
3683 if (!skb2) { 3713 if (!skb2) {
@@ -3689,7 +3719,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3689 bond_dev_queue_xmit(bond, skb2, slave->dev); 3719 bond_dev_queue_xmit(bond, skb2, slave->dev);
3690 } 3720 }
3691 } 3721 }
3692 if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP) 3722 if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
3693 bond_dev_queue_xmit(bond, skb, slave->dev); 3723 bond_dev_queue_xmit(bond, skb, slave->dev);
3694 else 3724 else
3695 dev_kfree_skb_any(skb); 3725 dev_kfree_skb_any(skb);
@@ -3714,7 +3744,7 @@ static inline int bond_slave_override(struct bonding *bond,
3714 /* Find out if any slaves have the same mapping as this skb. */ 3744 /* Find out if any slaves have the same mapping as this skb. */
3715 bond_for_each_slave_rcu(bond, slave, iter) { 3745 bond_for_each_slave_rcu(bond, slave, iter) {
3716 if (slave->queue_id == skb->queue_mapping) { 3746 if (slave->queue_id == skb->queue_mapping) {
3717 if (slave_can_tx(slave)) { 3747 if (bond_slave_can_tx(slave)) {
3718 bond_dev_queue_xmit(bond, skb, slave->dev); 3748 bond_dev_queue_xmit(bond, skb, slave->dev);
3719 return 0; 3749 return 0;
3720 } 3750 }
@@ -3755,12 +3785,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
3755{ 3785{
3756 struct bonding *bond = netdev_priv(dev); 3786 struct bonding *bond = netdev_priv(dev);
3757 3787
3758 if (TX_QUEUE_OVERRIDE(bond->params.mode)) { 3788 if (bond_should_override_tx_queue(bond) &&
3759 if (!bond_slave_override(bond, skb)) 3789 !bond_slave_override(bond, skb))
3760 return NETDEV_TX_OK; 3790 return NETDEV_TX_OK;
3761 }
3762 3791
3763 switch (bond->params.mode) { 3792 switch (BOND_MODE(bond)) {
3764 case BOND_MODE_ROUNDROBIN: 3793 case BOND_MODE_ROUNDROBIN:
3765 return bond_xmit_roundrobin(skb, dev); 3794 return bond_xmit_roundrobin(skb, dev);
3766 case BOND_MODE_ACTIVEBACKUP: 3795 case BOND_MODE_ACTIVEBACKUP:
@@ -3772,12 +3801,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
3772 case BOND_MODE_8023AD: 3801 case BOND_MODE_8023AD:
3773 return bond_3ad_xmit_xor(skb, dev); 3802 return bond_3ad_xmit_xor(skb, dev);
3774 case BOND_MODE_ALB: 3803 case BOND_MODE_ALB:
3775 case BOND_MODE_TLB:
3776 return bond_alb_xmit(skb, dev); 3804 return bond_alb_xmit(skb, dev);
3805 case BOND_MODE_TLB:
3806 return bond_tlb_xmit(skb, dev);
3777 default: 3807 default:
3778 /* Should never happen, mode already checked */ 3808 /* Should never happen, mode already checked */
3779 pr_err("%s: Error: Unknown bonding mode %d\n", 3809 pr_err("%s: Error: Unknown bonding mode %d\n",
3780 dev->name, bond->params.mode); 3810 dev->name, BOND_MODE(bond));
3781 WARN_ON_ONCE(1); 3811 WARN_ON_ONCE(1);
3782 dev_kfree_skb_any(skb); 3812 dev_kfree_skb_any(skb);
3783 return NETDEV_TX_OK; 3813 return NETDEV_TX_OK;
@@ -3817,14 +3847,14 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
3817 ecmd->duplex = DUPLEX_UNKNOWN; 3847 ecmd->duplex = DUPLEX_UNKNOWN;
3818 ecmd->port = PORT_OTHER; 3848 ecmd->port = PORT_OTHER;
3819 3849
3820 /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we 3850 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
3821 * do not need to check mode. Though link speed might not represent 3851 * do not need to check mode. Though link speed might not represent
3822 * the true receive or transmit bandwidth (not all modes are symmetric) 3852 * the true receive or transmit bandwidth (not all modes are symmetric)
3823 * this is an accurate maximum. 3853 * this is an accurate maximum.
3824 */ 3854 */
3825 read_lock(&bond->lock); 3855 read_lock(&bond->lock);
3826 bond_for_each_slave(bond, slave, iter) { 3856 bond_for_each_slave(bond, slave, iter) {
3827 if (SLAVE_IS_OK(slave)) { 3857 if (bond_slave_can_tx(slave)) {
3828 if (slave->speed != SPEED_UNKNOWN) 3858 if (slave->speed != SPEED_UNKNOWN)
3829 speed += slave->speed; 3859 speed += slave->speed;
3830 if (ecmd->duplex == DUPLEX_UNKNOWN && 3860 if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@ -3915,7 +3945,7 @@ void bond_setup(struct net_device *bond_dev)
3915 /* Initialize the device options */ 3945 /* Initialize the device options */
3916 bond_dev->tx_queue_len = 0; 3946 bond_dev->tx_queue_len = 0;
3917 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; 3947 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
3918 bond_dev->priv_flags |= IFF_BONDING; 3948 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
3919 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 3949 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
3920 3950
3921 /* At first, we block adding VLANs. That's the only way to 3951 /* At first, we block adding VLANs. That's the only way to
@@ -3994,7 +4024,8 @@ static int bond_check_params(struct bond_params *params)
3994 4024
3995 if (xmit_hash_policy) { 4025 if (xmit_hash_policy) {
3996 if ((bond_mode != BOND_MODE_XOR) && 4026 if ((bond_mode != BOND_MODE_XOR) &&
3997 (bond_mode != BOND_MODE_8023AD)) { 4027 (bond_mode != BOND_MODE_8023AD) &&
4028 (bond_mode != BOND_MODE_TLB)) {
3998 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 4029 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
3999 bond_mode_name(bond_mode)); 4030 bond_mode_name(bond_mode));
4000 } else { 4031 } else {
@@ -4079,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
4079 } 4110 }
4080 4111
4081 /* reset values for 802.3ad/TLB/ALB */ 4112 /* reset values for 802.3ad/TLB/ALB */
4082 if (BOND_NO_USES_ARP(bond_mode)) { 4113 if (!bond_mode_uses_arp(bond_mode)) {
4083 if (!miimon) { 4114 if (!miimon) {
4084 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); 4115 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4085 pr_warn("Forcing miimon to 100msec\n"); 4116 pr_warn("Forcing miimon to 100msec\n");
@@ -4161,7 +4192,7 @@ static int bond_check_params(struct bond_params *params)
4161 catch mistakes */ 4192 catch mistakes */
4162 __be32 ip; 4193 __be32 ip;
4163 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || 4194 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4164 IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { 4195 !bond_is_ip_target_ok(ip)) {
4165 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4196 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4166 arp_ip_target[i]); 4197 arp_ip_target[i]);
4167 arp_interval = 0; 4198 arp_interval = 0;
@@ -4234,7 +4265,7 @@ static int bond_check_params(struct bond_params *params)
4234 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n"); 4265 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
4235 } 4266 }
4236 4267
4237 if (primary && !USES_PRIMARY(bond_mode)) { 4268 if (primary && !bond_mode_uses_primary(bond_mode)) {
4238 /* currently, using a primary only makes sense 4269 /* currently, using a primary only makes sense
4239 * in active backup, TLB or ALB modes 4270 * in active backup, TLB or ALB modes
4240 */ 4271 */
@@ -4300,6 +4331,7 @@ static int bond_check_params(struct bond_params *params)
4300 params->min_links = min_links; 4331 params->min_links = min_links;
4301 params->lp_interval = lp_interval; 4332 params->lp_interval = lp_interval;
4302 params->packets_per_slave = packets_per_slave; 4333 params->packets_per_slave = packets_per_slave;
4334 params->tlb_dynamic_lb = 1; /* Default value */
4303 if (packets_per_slave > 0) { 4335 if (packets_per_slave > 0) {
4304 params->reciprocal_packets_per_slave = 4336 params->reciprocal_packets_per_slave =
4305 reciprocal_value(packets_per_slave); 4337 reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f847e165d252..5ab3c1847e67 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -56,10 +56,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id)) 56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
57 goto nla_put_failure; 57 goto nla_put_failure;
58 58
59 if (slave->bond->params.mode == BOND_MODE_8023AD) { 59 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
60 const struct aggregator *agg; 60 const struct aggregator *agg;
61 61
62 agg = SLAVE_AD_INFO(slave).port.aggregator; 62 agg = SLAVE_AD_INFO(slave)->port.aggregator;
63 if (agg) 63 if (agg)
64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, 64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
65 agg->aggregator_identifier)) 65 agg->aggregator_identifier))
@@ -407,7 +407,7 @@ static int bond_fill_info(struct sk_buff *skb,
407 unsigned int packets_per_slave; 407 unsigned int packets_per_slave;
408 int i, targets_added; 408 int i, targets_added;
409 409
410 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode)) 410 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
411 goto nla_put_failure; 411 goto nla_put_failure;
412 412
413 if (slave_dev && 413 if (slave_dev &&
@@ -505,7 +505,7 @@ static int bond_fill_info(struct sk_buff *skb,
505 bond->params.ad_select)) 505 bond->params.ad_select))
506 goto nla_put_failure; 506 goto nla_put_failure;
507 507
508 if (bond->params.mode == BOND_MODE_8023AD) { 508 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
509 struct ad_info info; 509 struct ad_info info;
510 510
511 if (!bond_3ad_get_active_agg_info(bond, &info)) { 511 if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 832070298446..540e0167bf24 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
70 const struct bond_opt_value *newval); 70 const struct bond_opt_value *newval);
71static int bond_option_slaves_set(struct bonding *bond, 71static int bond_option_slaves_set(struct bonding *bond,
72 const struct bond_opt_value *newval); 72 const struct bond_opt_value *newval);
73static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
74 const struct bond_opt_value *newval);
73 75
74 76
75static const struct bond_opt_value bond_mode_tbl[] = { 77static const struct bond_opt_value bond_mode_tbl[] = {
@@ -180,6 +182,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
180 { NULL, -1, 0}, 182 { NULL, -1, 0},
181}; 183};
182 184
185static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
186 { "off", 0, 0},
187 { "on", 1, BOND_VALFLAG_DEFAULT},
188 { NULL, -1, 0}
189};
190
183static const struct bond_option bond_opts[] = { 191static const struct bond_option bond_opts[] = {
184 [BOND_OPT_MODE] = { 192 [BOND_OPT_MODE] = {
185 .id = BOND_OPT_MODE, 193 .id = BOND_OPT_MODE,
@@ -200,7 +208,7 @@ static const struct bond_option bond_opts[] = {
200 [BOND_OPT_XMIT_HASH] = { 208 [BOND_OPT_XMIT_HASH] = {
201 .id = BOND_OPT_XMIT_HASH, 209 .id = BOND_OPT_XMIT_HASH,
202 .name = "xmit_hash_policy", 210 .name = "xmit_hash_policy",
203 .desc = "balance-xor and 802.3ad hashing method", 211 .desc = "balance-xor, 802.3ad, and tlb hashing method",
204 .values = bond_xmit_hashtype_tbl, 212 .values = bond_xmit_hashtype_tbl,
205 .set = bond_option_xmit_hash_policy_set 213 .set = bond_option_xmit_hash_policy_set
206 }, 214 },
@@ -365,9 +373,33 @@ static const struct bond_option bond_opts[] = {
365 .flags = BOND_OPTFLAG_RAWVAL, 373 .flags = BOND_OPTFLAG_RAWVAL,
366 .set = bond_option_slaves_set 374 .set = bond_option_slaves_set
367 }, 375 },
376 [BOND_OPT_TLB_DYNAMIC_LB] = {
377 .id = BOND_OPT_TLB_DYNAMIC_LB,
378 .name = "tlb_dynamic_lb",
379 .desc = "Enable dynamic flow shuffling",
380 .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
381 .values = bond_tlb_dynamic_lb_tbl,
382 .flags = BOND_OPTFLAG_IFDOWN,
383 .set = bond_option_tlb_dynamic_lb_set,
384 },
368 { } 385 { }
369}; 386};
370 387
388/* Searches for an option by name */
389const struct bond_option *bond_opt_get_by_name(const char *name)
390{
391 const struct bond_option *opt;
392 int option;
393
394 for (option = 0; option < BOND_OPT_LAST; option++) {
395 opt = bond_opt_get(option);
396 if (opt && !strcmp(opt->name, name))
397 return opt;
398 }
399
400 return NULL;
401}
402
371/* Searches for a value in opt's values[] table */ 403/* Searches for a value in opt's values[] table */
372const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val) 404const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
373{ 405{
@@ -641,7 +673,7 @@ const struct bond_option *bond_opt_get(unsigned int option)
641 673
642int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) 674int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
643{ 675{
644 if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) { 676 if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
645 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", 677 pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
646 bond->dev->name, newval->string); 678 bond->dev->name, newval->string);
647 /* disable arp monitoring */ 679 /* disable arp monitoring */
@@ -662,7 +694,7 @@ int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newv
662static struct net_device *__bond_option_active_slave_get(struct bonding *bond, 694static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
663 struct slave *slave) 695 struct slave *slave)
664{ 696{
665 return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL; 697 return bond_uses_primary(bond) && slave ? slave->dev : NULL;
666} 698}
667 699
668struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond) 700struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@ -727,7 +759,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
727 bond->dev->name, new_active->dev->name); 759 bond->dev->name, new_active->dev->name);
728 } else { 760 } else {
729 if (old_active && (new_active->link == BOND_LINK_UP) && 761 if (old_active && (new_active->link == BOND_LINK_UP) &&
730 IS_UP(new_active->dev)) { 762 bond_slave_is_up(new_active)) {
731 pr_info("%s: Setting %s as active slave\n", 763 pr_info("%s: Setting %s as active slave\n",
732 bond->dev->name, new_active->dev->name); 764 bond->dev->name, new_active->dev->name);
733 bond_change_active_slave(bond, new_active); 765 bond_change_active_slave(bond, new_active);
@@ -746,6 +778,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
746 return ret; 778 return ret;
747} 779}
748 780
781/* There are two tricky bits here. First, if MII monitoring is activated, then
782 * we must disable ARP monitoring. Second, if the timer isn't running, we must
783 * start it.
784 */
749static int bond_option_miimon_set(struct bonding *bond, 785static int bond_option_miimon_set(struct bonding *bond,
750 const struct bond_opt_value *newval) 786 const struct bond_opt_value *newval)
751{ 787{
@@ -784,6 +820,10 @@ static int bond_option_miimon_set(struct bonding *bond,
784 return 0; 820 return 0;
785} 821}
786 822
823/* Set up and down delays. These must be multiples of the
824 * MII monitoring value, and are stored internally as the multiplier.
825 * Thus, we must translate to MS for the real world.
826 */
787static int bond_option_updelay_set(struct bonding *bond, 827static int bond_option_updelay_set(struct bonding *bond,
788 const struct bond_opt_value *newval) 828 const struct bond_opt_value *newval)
789{ 829{
@@ -842,6 +882,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
842 return 0; 882 return 0;
843} 883}
844 884
885/* There are two tricky bits here. First, if ARP monitoring is activated, then
886 * we must disable MII monitoring. Second, if the ARP timer isn't running,
887 * we must start it.
888 */
845static int bond_option_arp_interval_set(struct bonding *bond, 889static int bond_option_arp_interval_set(struct bonding *bond,
846 const struct bond_opt_value *newval) 890 const struct bond_opt_value *newval)
847{ 891{
@@ -899,7 +943,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
899 __be32 *targets = bond->params.arp_targets; 943 __be32 *targets = bond->params.arp_targets;
900 int ind; 944 int ind;
901 945
902 if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) { 946 if (!bond_is_ip_target_ok(target)) {
903 pr_err("%s: invalid ARP target %pI4 specified for addition\n", 947 pr_err("%s: invalid ARP target %pI4 specified for addition\n",
904 bond->dev->name, &target); 948 bond->dev->name, &target);
905 return -EINVAL; 949 return -EINVAL;
@@ -944,7 +988,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
944 unsigned long *targets_rx; 988 unsigned long *targets_rx;
945 int ind, i; 989 int ind, i;
946 990
947 if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) { 991 if (!bond_is_ip_target_ok(target)) {
948 pr_err("%s: invalid ARP target %pI4 specified for removal\n", 992 pr_err("%s: invalid ARP target %pI4 specified for removal\n",
949 bond->dev->name, &target); 993 bond->dev->name, &target);
950 return -EINVAL; 994 return -EINVAL;
@@ -1338,3 +1382,13 @@ err_no_cmd:
1338 ret = -EPERM; 1382 ret = -EPERM;
1339 goto out; 1383 goto out;
1340} 1384}
1385
1386static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
1387 const struct bond_opt_value *newval)
1388{
1389 pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
1390 bond->dev->name, newval->string, newval->value);
1391 bond->params.tlb_dynamic_lb = newval->value;
1392
1393 return 0;
1394}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 12be9e1bfb0c..17ded5b29176 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -62,6 +62,7 @@ enum {
62 BOND_OPT_RESEND_IGMP, 62 BOND_OPT_RESEND_IGMP,
63 BOND_OPT_LP_INTERVAL, 63 BOND_OPT_LP_INTERVAL,
64 BOND_OPT_SLAVES, 64 BOND_OPT_SLAVES,
65 BOND_OPT_TLB_DYNAMIC_LB,
65 BOND_OPT_LAST 66 BOND_OPT_LAST
66}; 67};
67 68
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
104const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, 105const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
105 struct bond_opt_value *val); 106 struct bond_opt_value *val);
106const struct bond_option *bond_opt_get(unsigned int option); 107const struct bond_option *bond_opt_get(unsigned int option);
108const struct bond_option *bond_opt_get_by_name(const char *name);
107const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); 109const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
108 110
109/* This helper is used to initialize a bond_opt_value structure for parameter 111/* This helper is used to initialize a bond_opt_value structure for parameter
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 013fdd0f45e9..b215b479bb3a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -72,9 +72,9 @@ static void bond_info_show_master(struct seq_file *seq)
72 curr = rcu_dereference(bond->curr_active_slave); 72 curr = rcu_dereference(bond->curr_active_slave);
73 73
74 seq_printf(seq, "Bonding Mode: %s", 74 seq_printf(seq, "Bonding Mode: %s",
75 bond_mode_name(bond->params.mode)); 75 bond_mode_name(BOND_MODE(bond)));
76 76
77 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && 77 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
78 bond->params.fail_over_mac) { 78 bond->params.fail_over_mac) {
79 optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC, 79 optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
80 bond->params.fail_over_mac); 80 bond->params.fail_over_mac);
@@ -83,15 +83,15 @@ static void bond_info_show_master(struct seq_file *seq)
83 83
84 seq_printf(seq, "\n"); 84 seq_printf(seq, "\n");
85 85
86 if (bond->params.mode == BOND_MODE_XOR || 86 if (BOND_MODE(bond) == BOND_MODE_XOR ||
87 bond->params.mode == BOND_MODE_8023AD) { 87 BOND_MODE(bond) == BOND_MODE_8023AD) {
88 optval = bond_opt_get_val(BOND_OPT_XMIT_HASH, 88 optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
89 bond->params.xmit_policy); 89 bond->params.xmit_policy);
90 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", 90 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
91 optval->string, bond->params.xmit_policy); 91 optval->string, bond->params.xmit_policy);
92 } 92 }
93 93
94 if (USES_PRIMARY(bond->params.mode)) { 94 if (bond_uses_primary(bond)) {
95 seq_printf(seq, "Primary Slave: %s", 95 seq_printf(seq, "Primary Slave: %s",
96 (bond->primary_slave) ? 96 (bond->primary_slave) ?
97 bond->primary_slave->dev->name : "None"); 97 bond->primary_slave->dev->name : "None");
@@ -134,7 +134,7 @@ static void bond_info_show_master(struct seq_file *seq)
134 seq_printf(seq, "\n"); 134 seq_printf(seq, "\n");
135 } 135 }
136 136
137 if (bond->params.mode == BOND_MODE_8023AD) { 137 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
138 struct ad_info ad_info; 138 struct ad_info ad_info;
139 139
140 seq_puts(seq, "\n802.3ad info\n"); 140 seq_puts(seq, "\n802.3ad info\n");
@@ -188,9 +188,9 @@ static void bond_info_show_slave(struct seq_file *seq,
188 188
189 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); 189 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
190 190
191 if (bond->params.mode == BOND_MODE_8023AD) { 191 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
192 const struct aggregator *agg 192 const struct aggregator *agg
193 = SLAVE_AD_INFO(slave).port.aggregator; 193 = SLAVE_AD_INFO(slave)->port.aggregator;
194 194
195 if (agg) 195 if (agg)
196 seq_printf(seq, "Aggregator ID: %d\n", 196 seq_printf(seq, "Aggregator ID: %d\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f6babcfc26e..daed52f68ce1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -45,8 +45,7 @@
45#define to_dev(obj) container_of(obj, struct device, kobj) 45#define to_dev(obj) container_of(obj, struct device, kobj)
46#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) 46#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
47 47
48/* 48/* "show" function for the bond_masters attribute.
49 * "show" function for the bond_masters attribute.
50 * The class parameter is ignored. 49 * The class parameter is ignored.
51 */ 50 */
52static ssize_t bonding_show_bonds(struct class *cls, 51static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
88 return NULL; 87 return NULL;
89} 88}
90 89
91/* 90/* "store" function for the bond_masters attribute. This is what
92 * "store" function for the bond_masters attribute. This is what
93 * creates and deletes entire bonds. 91 * creates and deletes entire bonds.
94 * 92 *
95 * The class parameter is ignored. 93 * The class parameter is ignored.
96 * 94 *
97 */ 95 */
98
99static ssize_t bonding_store_bonds(struct class *cls, 96static ssize_t bonding_store_bonds(struct class *cls,
100 struct class_attribute *attr, 97 struct class_attribute *attr,
101 const char *buffer, size_t count) 98 const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
158 .store = bonding_store_bonds, 155 .store = bonding_store_bonds,
159}; 156};
160 157
161/* 158/* Generic "store" method for bonding sysfs option setting */
162 * Show the slaves in the current bond. 159static ssize_t bonding_sysfs_store_option(struct device *d,
163 */ 160 struct device_attribute *attr,
161 const char *buffer, size_t count)
162{
163 struct bonding *bond = to_bond(d);
164 const struct bond_option *opt;
165 int ret;
166
167 opt = bond_opt_get_by_name(attr->attr.name);
168 if (WARN_ON(!opt))
169 return -ENOENT;
170 ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
171 if (!ret)
172 ret = count;
173
174 return ret;
175}
176
177/* Show the slaves in the current bond. */
164static ssize_t bonding_show_slaves(struct device *d, 178static ssize_t bonding_show_slaves(struct device *d,
165 struct device_attribute *attr, char *buf) 179 struct device_attribute *attr, char *buf)
166{ 180{
@@ -190,62 +204,24 @@ static ssize_t bonding_show_slaves(struct device *d,
190 204
191 return res; 205 return res;
192} 206}
193
194/*
195 * Set the slaves in the current bond.
196 * This is supposed to be only thin wrapper for bond_enslave and bond_release.
197 * All hard work should be done there.
198 */
199static ssize_t bonding_store_slaves(struct device *d,
200 struct device_attribute *attr,
201 const char *buffer, size_t count)
202{
203 struct bonding *bond = to_bond(d);
204 int ret;
205
206 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
207 if (!ret)
208 ret = count;
209
210 return ret;
211}
212static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, 207static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
213 bonding_store_slaves); 208 bonding_sysfs_store_option);
214 209
215/* 210/* Show the bonding mode. */
216 * Show and set the bonding mode. The bond interface must be down to
217 * change the mode.
218 */
219static ssize_t bonding_show_mode(struct device *d, 211static ssize_t bonding_show_mode(struct device *d,
220 struct device_attribute *attr, char *buf) 212 struct device_attribute *attr, char *buf)
221{ 213{
222 struct bonding *bond = to_bond(d); 214 struct bonding *bond = to_bond(d);
223 const struct bond_opt_value *val; 215 const struct bond_opt_value *val;
224 216
225 val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode); 217 val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
226 218
227 return sprintf(buf, "%s %d\n", val->string, bond->params.mode); 219 return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
228}
229
230static ssize_t bonding_store_mode(struct device *d,
231 struct device_attribute *attr,
232 const char *buf, size_t count)
233{
234 struct bonding *bond = to_bond(d);
235 int ret;
236
237 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
238 if (!ret)
239 ret = count;
240
241 return ret;
242} 220}
243static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, 221static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
244 bonding_show_mode, bonding_store_mode); 222 bonding_show_mode, bonding_sysfs_store_option);
245 223
246/* 224/* Show the bonding transmit hash method. */
247 * Show and set the bonding transmit hash method.
248 */
249static ssize_t bonding_show_xmit_hash(struct device *d, 225static ssize_t bonding_show_xmit_hash(struct device *d,
250 struct device_attribute *attr, 226 struct device_attribute *attr,
251 char *buf) 227 char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
257 233
258 return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy); 234 return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
259} 235}
260
261static ssize_t bonding_store_xmit_hash(struct device *d,
262 struct device_attribute *attr,
263 const char *buf, size_t count)
264{
265 struct bonding *bond = to_bond(d);
266 int ret;
267
268 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
269 if (!ret)
270 ret = count;
271
272 return ret;
273}
274static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, 236static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
275 bonding_show_xmit_hash, bonding_store_xmit_hash); 237 bonding_show_xmit_hash, bonding_sysfs_store_option);
276 238
277/* 239/* Show arp_validate. */
278 * Show and set arp_validate.
279 */
280static ssize_t bonding_show_arp_validate(struct device *d, 240static ssize_t bonding_show_arp_validate(struct device *d,
281 struct device_attribute *attr, 241 struct device_attribute *attr,
282 char *buf) 242 char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
289 249
290 return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate); 250 return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
291} 251}
292
293static ssize_t bonding_store_arp_validate(struct device *d,
294 struct device_attribute *attr,
295 const char *buf, size_t count)
296{
297 struct bonding *bond = to_bond(d);
298 int ret;
299
300 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
301 if (!ret)
302 ret = count;
303
304 return ret;
305}
306
307static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, 252static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
308 bonding_store_arp_validate); 253 bonding_sysfs_store_option);
309/* 254
310 * Show and set arp_all_targets. 255/* Show arp_all_targets. */
311 */
312static ssize_t bonding_show_arp_all_targets(struct device *d, 256static ssize_t bonding_show_arp_all_targets(struct device *d,
313 struct device_attribute *attr, 257 struct device_attribute *attr,
314 char *buf) 258 char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
321 return sprintf(buf, "%s %d\n", 265 return sprintf(buf, "%s %d\n",
322 val->string, bond->params.arp_all_targets); 266 val->string, bond->params.arp_all_targets);
323} 267}
324
325static ssize_t bonding_store_arp_all_targets(struct device *d,
326 struct device_attribute *attr,
327 const char *buf, size_t count)
328{
329 struct bonding *bond = to_bond(d);
330 int ret;
331
332 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
333 if (!ret)
334 ret = count;
335
336 return ret;
337}
338
339static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR, 268static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
340 bonding_show_arp_all_targets, bonding_store_arp_all_targets); 269 bonding_show_arp_all_targets, bonding_sysfs_store_option);
341 270
342/* 271/* Show fail_over_mac. */
343 * Show and store fail_over_mac. User only allowed to change the
344 * value when there are no slaves.
345 */
346static ssize_t bonding_show_fail_over_mac(struct device *d, 272static ssize_t bonding_show_fail_over_mac(struct device *d,
347 struct device_attribute *attr, 273 struct device_attribute *attr,
348 char *buf) 274 char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
355 281
356 return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac); 282 return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
357} 283}
358
359static ssize_t bonding_store_fail_over_mac(struct device *d,
360 struct device_attribute *attr,
361 const char *buf, size_t count)
362{
363 struct bonding *bond = to_bond(d);
364 int ret;
365
366 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
367 if (!ret)
368 ret = count;
369
370 return ret;
371}
372
373static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, 284static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
374 bonding_show_fail_over_mac, bonding_store_fail_over_mac); 285 bonding_show_fail_over_mac, bonding_sysfs_store_option);
375 286
376/* 287/* Show the arp timer interval. */
377 * Show and set the arp timer interval. There are two tricky bits
378 * here. First, if ARP monitoring is activated, then we must disable
379 * MII monitoring. Second, if the ARP timer isn't running, we must
380 * start it.
381 */
382static ssize_t bonding_show_arp_interval(struct device *d, 288static ssize_t bonding_show_arp_interval(struct device *d,
383 struct device_attribute *attr, 289 struct device_attribute *attr,
384 char *buf) 290 char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
387 293
388 return sprintf(buf, "%d\n", bond->params.arp_interval); 294 return sprintf(buf, "%d\n", bond->params.arp_interval);
389} 295}
390
391static ssize_t bonding_store_arp_interval(struct device *d,
392 struct device_attribute *attr,
393 const char *buf, size_t count)
394{
395 struct bonding *bond = to_bond(d);
396 int ret;
397
398 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
399 if (!ret)
400 ret = count;
401
402 return ret;
403}
404static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, 296static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
405 bonding_show_arp_interval, bonding_store_arp_interval); 297 bonding_show_arp_interval, bonding_sysfs_store_option);
406 298
407/* 299/* Show the arp targets. */
408 * Show and set the arp targets.
409 */
410static ssize_t bonding_show_arp_targets(struct device *d, 300static ssize_t bonding_show_arp_targets(struct device *d,
411 struct device_attribute *attr, 301 struct device_attribute *attr,
412 char *buf) 302 char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
424 314
425 return res; 315 return res;
426} 316}
317static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
318 bonding_show_arp_targets, bonding_sysfs_store_option);
427 319
428static ssize_t bonding_store_arp_targets(struct device *d, 320/* Show the up and down delays. */
429 struct device_attribute *attr,
430 const char *buf, size_t count)
431{
432 struct bonding *bond = to_bond(d);
433 int ret;
434
435 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
436 if (!ret)
437 ret = count;
438
439 return ret;
440}
441static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
442
443/*
444 * Show and set the up and down delays. These must be multiples of the
445 * MII monitoring value, and are stored internally as the multiplier.
446 * Thus, we must translate to MS for the real world.
447 */
448static ssize_t bonding_show_downdelay(struct device *d, 321static ssize_t bonding_show_downdelay(struct device *d,
449 struct device_attribute *attr, 322 struct device_attribute *attr,
450 char *buf) 323 char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
453 326
454 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon); 327 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
455} 328}
456
457static ssize_t bonding_store_downdelay(struct device *d,
458 struct device_attribute *attr,
459 const char *buf, size_t count)
460{
461 struct bonding *bond = to_bond(d);
462 int ret;
463
464 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
465 if (!ret)
466 ret = count;
467
468 return ret;
469}
470static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, 329static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
471 bonding_show_downdelay, bonding_store_downdelay); 330 bonding_show_downdelay, bonding_sysfs_store_option);
472 331
473static ssize_t bonding_show_updelay(struct device *d, 332static ssize_t bonding_show_updelay(struct device *d,
474 struct device_attribute *attr, 333 struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
479 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon); 338 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
480 339
481} 340}
482
483static ssize_t bonding_store_updelay(struct device *d,
484 struct device_attribute *attr,
485 const char *buf, size_t count)
486{
487 struct bonding *bond = to_bond(d);
488 int ret;
489
490 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
491 if (!ret)
492 ret = count;
493
494 return ret;
495}
496static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, 341static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
497 bonding_show_updelay, bonding_store_updelay); 342 bonding_show_updelay, bonding_sysfs_store_option);
498 343
499/* 344/* Show the LACP interval. */
500 * Show and set the LACP interval. Interface must be down, and the mode
501 * must be set to 802.3ad mode.
502 */
503static ssize_t bonding_show_lacp(struct device *d, 345static ssize_t bonding_show_lacp(struct device *d,
504 struct device_attribute *attr, 346 struct device_attribute *attr,
505 char *buf) 347 char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
511 353
512 return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast); 354 return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
513} 355}
514
515static ssize_t bonding_store_lacp(struct device *d,
516 struct device_attribute *attr,
517 const char *buf, size_t count)
518{
519 struct bonding *bond = to_bond(d);
520 int ret;
521
522 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
523 if (!ret)
524 ret = count;
525
526 return ret;
527}
528static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
529 bonding_show_lacp, bonding_store_lacp); 357 bonding_show_lacp, bonding_sysfs_store_option);
530 358
531static ssize_t bonding_show_min_links(struct device *d, 359static ssize_t bonding_show_min_links(struct device *d,
532 struct device_attribute *attr, 360 struct device_attribute *attr,
@@ -536,22 +364,8 @@ static ssize_t bonding_show_min_links(struct device *d,
536 364
537 return sprintf(buf, "%u\n", bond->params.min_links); 365 return sprintf(buf, "%u\n", bond->params.min_links);
538} 366}
539
540static ssize_t bonding_store_min_links(struct device *d,
541 struct device_attribute *attr,
542 const char *buf, size_t count)
543{
544 struct bonding *bond = to_bond(d);
545 int ret;
546
547 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
548 if (!ret)
549 ret = count;
550
551 return ret;
552}
553static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR, 367static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
554 bonding_show_min_links, bonding_store_min_links); 368 bonding_show_min_links, bonding_sysfs_store_option);
555 369
556static ssize_t bonding_show_ad_select(struct device *d, 370static ssize_t bonding_show_ad_select(struct device *d,
557 struct device_attribute *attr, 371 struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
564 378
565 return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select); 379 return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
566} 380}
567
568
569static ssize_t bonding_store_ad_select(struct device *d,
570 struct device_attribute *attr,
571 const char *buf, size_t count)
572{
573 struct bonding *bond = to_bond(d);
574 int ret;
575
576 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
577 if (!ret)
578 ret = count;
579
580 return ret;
581}
582static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, 381static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
583 bonding_show_ad_select, bonding_store_ad_select); 382 bonding_show_ad_select, bonding_sysfs_store_option);
584 383
585/* 384/* Show and set the number of peer notifications to send after a failover event. */
586 * Show and set the number of peer notifications to send after a failover event.
587 */
588static ssize_t bonding_show_num_peer_notif(struct device *d, 385static ssize_t bonding_show_num_peer_notif(struct device *d,
589 struct device_attribute *attr, 386 struct device_attribute *attr,
590 char *buf) 387 char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
611static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, 408static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
612 bonding_show_num_peer_notif, bonding_store_num_peer_notif); 409 bonding_show_num_peer_notif, bonding_store_num_peer_notif);
613 410
614/* 411/* Show the MII monitor interval. */
615 * Show and set the MII monitor interval. There are two tricky bits
616 * here. First, if MII monitoring is activated, then we must disable
617 * ARP monitoring. Second, if the timer isn't running, we must
618 * start it.
619 */
620static ssize_t bonding_show_miimon(struct device *d, 412static ssize_t bonding_show_miimon(struct device *d,
621 struct device_attribute *attr, 413 struct device_attribute *attr,
622 char *buf) 414 char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
625 417
626 return sprintf(buf, "%d\n", bond->params.miimon); 418 return sprintf(buf, "%d\n", bond->params.miimon);
627} 419}
628
629static ssize_t bonding_store_miimon(struct device *d,
630 struct device_attribute *attr,
631 const char *buf, size_t count)
632{
633 struct bonding *bond = to_bond(d);
634 int ret;
635
636 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
637 if (!ret)
638 ret = count;
639
640 return ret;
641}
642static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, 420static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
643 bonding_show_miimon, bonding_store_miimon); 421 bonding_show_miimon, bonding_sysfs_store_option);
644 422
645/* 423/* Show the primary slave. */
646 * Show and set the primary slave. The store function is much
647 * simpler than bonding_store_slaves function because it only needs to
648 * handle one interface name.
649 * The bond must be a mode that supports a primary for this be
650 * set.
651 */
652static ssize_t bonding_show_primary(struct device *d, 424static ssize_t bonding_show_primary(struct device *d,
653 struct device_attribute *attr, 425 struct device_attribute *attr,
654 char *buf) 426 char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
661 433
662 return count; 434 return count;
663} 435}
664
665static ssize_t bonding_store_primary(struct device *d,
666 struct device_attribute *attr,
667 const char *buf, size_t count)
668{
669 struct bonding *bond = to_bond(d);
670 int ret;
671
672 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
673 if (!ret)
674 ret = count;
675
676 return ret;
677}
678static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, 436static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
679 bonding_show_primary, bonding_store_primary); 437 bonding_show_primary, bonding_sysfs_store_option);
680 438
681/* 439/* Show the primary_reselect flag. */
682 * Show and set the primary_reselect flag.
683 */
684static ssize_t bonding_show_primary_reselect(struct device *d, 440static ssize_t bonding_show_primary_reselect(struct device *d,
685 struct device_attribute *attr, 441 struct device_attribute *attr,
686 char *buf) 442 char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
694 return sprintf(buf, "%s %d\n", 450 return sprintf(buf, "%s %d\n",
695 val->string, bond->params.primary_reselect); 451 val->string, bond->params.primary_reselect);
696} 452}
697
698static ssize_t bonding_store_primary_reselect(struct device *d,
699 struct device_attribute *attr,
700 const char *buf, size_t count)
701{
702 struct bonding *bond = to_bond(d);
703 int ret;
704
705 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
706 (char *)buf);
707 if (!ret)
708 ret = count;
709
710 return ret;
711}
712static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR, 453static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
713 bonding_show_primary_reselect, 454 bonding_show_primary_reselect, bonding_sysfs_store_option);
714 bonding_store_primary_reselect);
715 455
716/* 456/* Show the use_carrier flag. */
717 * Show and set the use_carrier flag.
718 */
719static ssize_t bonding_show_carrier(struct device *d, 457static ssize_t bonding_show_carrier(struct device *d,
720 struct device_attribute *attr, 458 struct device_attribute *attr,
721 char *buf) 459 char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
724 462
725 return sprintf(buf, "%d\n", bond->params.use_carrier); 463 return sprintf(buf, "%d\n", bond->params.use_carrier);
726} 464}
727
728static ssize_t bonding_store_carrier(struct device *d,
729 struct device_attribute *attr,
730 const char *buf, size_t count)
731{
732 struct bonding *bond = to_bond(d);
733 int ret;
734
735 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
736 if (!ret)
737 ret = count;
738
739 return ret;
740}
741static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, 465static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
742 bonding_show_carrier, bonding_store_carrier); 466 bonding_show_carrier, bonding_sysfs_store_option);
743 467
744 468
745/* 469/* Show currently active_slave. */
746 * Show and set currently active_slave.
747 */
748static ssize_t bonding_show_active_slave(struct device *d, 470static ssize_t bonding_show_active_slave(struct device *d,
749 struct device_attribute *attr, 471 struct device_attribute *attr,
750 char *buf) 472 char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
761 483
762 return count; 484 return count;
763} 485}
764
765static ssize_t bonding_store_active_slave(struct device *d,
766 struct device_attribute *attr,
767 const char *buf, size_t count)
768{
769 struct bonding *bond = to_bond(d);
770 int ret;
771
772 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
773 if (!ret)
774 ret = count;
775
776 return ret;
777}
778static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, 486static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
779 bonding_show_active_slave, bonding_store_active_slave); 487 bonding_show_active_slave, bonding_sysfs_store_option);
780
781 488
782/* 489/* Show link status of the bond interface. */
783 * Show link status of the bond interface.
784 */
785static ssize_t bonding_show_mii_status(struct device *d, 490static ssize_t bonding_show_mii_status(struct device *d,
786 struct device_attribute *attr, 491 struct device_attribute *attr,
787 char *buf) 492 char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
792} 497}
793static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); 498static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
794 499
795/* 500/* Show current 802.3ad aggregator ID. */
796 * Show current 802.3ad aggregator ID.
797 */
798static ssize_t bonding_show_ad_aggregator(struct device *d, 501static ssize_t bonding_show_ad_aggregator(struct device *d,
799 struct device_attribute *attr, 502 struct device_attribute *attr,
800 char *buf) 503 char *buf)
@@ -802,7 +505,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
802 int count = 0; 505 int count = 0;
803 struct bonding *bond = to_bond(d); 506 struct bonding *bond = to_bond(d);
804 507
805 if (bond->params.mode == BOND_MODE_8023AD) { 508 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
806 struct ad_info ad_info; 509 struct ad_info ad_info;
807 count = sprintf(buf, "%d\n", 510 count = sprintf(buf, "%d\n",
808 bond_3ad_get_active_agg_info(bond, &ad_info) 511 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
814static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL); 517static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
815 518
816 519
817/* 520/* Show number of active 802.3ad ports. */
818 * Show number of active 802.3ad ports.
819 */
820static ssize_t bonding_show_ad_num_ports(struct device *d, 521static ssize_t bonding_show_ad_num_ports(struct device *d,
821 struct device_attribute *attr, 522 struct device_attribute *attr,
822 char *buf) 523 char *buf)
@@ -824,7 +525,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
824 int count = 0; 525 int count = 0;
825 struct bonding *bond = to_bond(d); 526 struct bonding *bond = to_bond(d);
826 527
827 if (bond->params.mode == BOND_MODE_8023AD) { 528 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
828 struct ad_info ad_info; 529 struct ad_info ad_info;
829 count = sprintf(buf, "%d\n", 530 count = sprintf(buf, "%d\n",
830 bond_3ad_get_active_agg_info(bond, &ad_info) 531 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
836static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL); 537static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
837 538
838 539
839/* 540/* Show current 802.3ad actor key. */
840 * Show current 802.3ad actor key.
841 */
842static ssize_t bonding_show_ad_actor_key(struct device *d, 541static ssize_t bonding_show_ad_actor_key(struct device *d,
843 struct device_attribute *attr, 542 struct device_attribute *attr,
844 char *buf) 543 char *buf)
@@ -846,7 +545,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
846 int count = 0; 545 int count = 0;
847 struct bonding *bond = to_bond(d); 546 struct bonding *bond = to_bond(d);
848 547
849 if (bond->params.mode == BOND_MODE_8023AD) { 548 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
850 struct ad_info ad_info; 549 struct ad_info ad_info;
851 count = sprintf(buf, "%d\n", 550 count = sprintf(buf, "%d\n",
852 bond_3ad_get_active_agg_info(bond, &ad_info) 551 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
858static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL); 557static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
859 558
860 559
861/* 560/* Show current 802.3ad partner key. */
862 * Show current 802.3ad partner key.
863 */
864static ssize_t bonding_show_ad_partner_key(struct device *d, 561static ssize_t bonding_show_ad_partner_key(struct device *d,
865 struct device_attribute *attr, 562 struct device_attribute *attr,
866 char *buf) 563 char *buf)
@@ -868,7 +565,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
868 int count = 0; 565 int count = 0;
869 struct bonding *bond = to_bond(d); 566 struct bonding *bond = to_bond(d);
870 567
871 if (bond->params.mode == BOND_MODE_8023AD) { 568 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
872 struct ad_info ad_info; 569 struct ad_info ad_info;
873 count = sprintf(buf, "%d\n", 570 count = sprintf(buf, "%d\n",
874 bond_3ad_get_active_agg_info(bond, &ad_info) 571 bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
880static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL); 577static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
881 578
882 579
883/* 580/* Show current 802.3ad partner mac. */
884 * Show current 802.3ad partner mac.
885 */
886static ssize_t bonding_show_ad_partner_mac(struct device *d, 581static ssize_t bonding_show_ad_partner_mac(struct device *d,
887 struct device_attribute *attr, 582 struct device_attribute *attr,
888 char *buf) 583 char *buf)
@@ -890,7 +585,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
890 int count = 0; 585 int count = 0;
891 struct bonding *bond = to_bond(d); 586 struct bonding *bond = to_bond(d);
892 587
893 if (bond->params.mode == BOND_MODE_8023AD) { 588 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
894 struct ad_info ad_info; 589 struct ad_info ad_info;
895 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) 590 if (!bond_3ad_get_active_agg_info(bond, &ad_info))
896 count = sprintf(buf, "%pM\n", ad_info.partner_system); 591 count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
900} 595}
901static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); 596static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
902 597
903/* 598/* Show the queue_ids of the slaves in the current bond. */
904 * Show the queue_ids of the slaves in the current bond.
905 */
906static ssize_t bonding_show_queue_id(struct device *d, 599static ssize_t bonding_show_queue_id(struct device *d,
907 struct device_attribute *attr, 600 struct device_attribute *attr,
908 char *buf) 601 char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
933 626
934 return res; 627 return res;
935} 628}
936
937/*
938 * Set the queue_ids of the slaves in the current bond. The bond
939 * interface must be enslaved for this to work.
940 */
941static ssize_t bonding_store_queue_id(struct device *d,
942 struct device_attribute *attr,
943 const char *buffer, size_t count)
944{
945 struct bonding *bond = to_bond(d);
946 int ret;
947
948 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
949 if (!ret)
950 ret = count;
951
952 return ret;
953}
954static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id, 629static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
955 bonding_store_queue_id); 630 bonding_sysfs_store_option);
956 631
957 632
958/* 633/* Show the all_slaves_active flag. */
959 * Show and set the all_slaves_active flag.
960 */
961static ssize_t bonding_show_slaves_active(struct device *d, 634static ssize_t bonding_show_slaves_active(struct device *d,
962 struct device_attribute *attr, 635 struct device_attribute *attr,
963 char *buf) 636 char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
966 639
967 return sprintf(buf, "%d\n", bond->params.all_slaves_active); 640 return sprintf(buf, "%d\n", bond->params.all_slaves_active);
968} 641}
969
970static ssize_t bonding_store_slaves_active(struct device *d,
971 struct device_attribute *attr,
972 const char *buf, size_t count)
973{
974 struct bonding *bond = to_bond(d);
975 int ret;
976
977 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
978 (char *)buf);
979 if (!ret)
980 ret = count;
981
982 return ret;
983}
984static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 642static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
985 bonding_show_slaves_active, bonding_store_slaves_active); 643 bonding_show_slaves_active, bonding_sysfs_store_option);
986 644
987/* 645/* Show the number of IGMP membership reports to send on link failure */
988 * Show and set the number of IGMP membership reports to send on link failure
989 */
990static ssize_t bonding_show_resend_igmp(struct device *d, 646static ssize_t bonding_show_resend_igmp(struct device *d,
991 struct device_attribute *attr, 647 struct device_attribute *attr,
992 char *buf) 648 char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
995 651
996 return sprintf(buf, "%d\n", bond->params.resend_igmp); 652 return sprintf(buf, "%d\n", bond->params.resend_igmp);
997} 653}
998
999static ssize_t bonding_store_resend_igmp(struct device *d,
1000 struct device_attribute *attr,
1001 const char *buf, size_t count)
1002{
1003 struct bonding *bond = to_bond(d);
1004 int ret;
1005
1006 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
1007 if (!ret)
1008 ret = count;
1009
1010 return ret;
1011}
1012
1013static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, 654static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
1014 bonding_show_resend_igmp, bonding_store_resend_igmp); 655 bonding_show_resend_igmp, bonding_sysfs_store_option);
1015 656
1016 657
1017static ssize_t bonding_show_lp_interval(struct device *d, 658static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
1019 char *buf) 660 char *buf)
1020{ 661{
1021 struct bonding *bond = to_bond(d); 662 struct bonding *bond = to_bond(d);
663
1022 return sprintf(buf, "%d\n", bond->params.lp_interval); 664 return sprintf(buf, "%d\n", bond->params.lp_interval);
1023} 665}
666static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
667 bonding_show_lp_interval, bonding_sysfs_store_option);
1024 668
1025static ssize_t bonding_store_lp_interval(struct device *d, 669static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
1026 struct device_attribute *attr, 670 struct device_attribute *attr,
1027 const char *buf, size_t count) 671 char *buf)
1028{ 672{
1029 struct bonding *bond = to_bond(d); 673 struct bonding *bond = to_bond(d);
1030 int ret; 674 return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
1031
1032 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
1033 if (!ret)
1034 ret = count;
1035
1036 return ret;
1037} 675}
1038 676static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
1039static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR, 677 bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
1040 bonding_show_lp_interval, bonding_store_lp_interval);
1041 678
1042static ssize_t bonding_show_packets_per_slave(struct device *d, 679static ssize_t bonding_show_packets_per_slave(struct device *d,
1043 struct device_attribute *attr, 680 struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
1045{ 682{
1046 struct bonding *bond = to_bond(d); 683 struct bonding *bond = to_bond(d);
1047 unsigned int packets_per_slave = bond->params.packets_per_slave; 684 unsigned int packets_per_slave = bond->params.packets_per_slave;
1048 return sprintf(buf, "%u\n", packets_per_slave);
1049}
1050
1051static ssize_t bonding_store_packets_per_slave(struct device *d,
1052 struct device_attribute *attr,
1053 const char *buf, size_t count)
1054{
1055 struct bonding *bond = to_bond(d);
1056 int ret;
1057 685
1058 ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE, 686 return sprintf(buf, "%u\n", packets_per_slave);
1059 (char *)buf);
1060 if (!ret)
1061 ret = count;
1062
1063 return ret;
1064} 687}
1065
1066static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR, 688static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
1067 bonding_show_packets_per_slave, 689 bonding_show_packets_per_slave, bonding_sysfs_store_option);
1068 bonding_store_packets_per_slave);
1069 690
1070static struct attribute *per_bond_attrs[] = { 691static struct attribute *per_bond_attrs[] = {
1071 &dev_attr_slaves.attr, 692 &dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
1099 &dev_attr_min_links.attr, 720 &dev_attr_min_links.attr,
1100 &dev_attr_lp_interval.attr, 721 &dev_attr_lp_interval.attr,
1101 &dev_attr_packets_per_slave.attr, 722 &dev_attr_packets_per_slave.attr,
723 &dev_attr_tlb_dynamic_lb.attr,
1102 NULL, 724 NULL,
1103}; 725};
1104 726
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
1107 .attrs = per_bond_attrs, 729 .attrs = per_bond_attrs,
1108}; 730};
1109 731
1110/* 732/* Initialize sysfs. This sets up the bonding_masters file in
1111 * Initialize sysfs. This sets up the bonding_masters file in
1112 * /sys/class/net. 733 * /sys/class/net.
1113 */ 734 */
1114int bond_create_sysfs(struct bond_net *bn) 735int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
1120 741
1121 ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters, 742 ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
1122 bn->net); 743 bn->net);
1123 /* 744 /* Permit multiple loads of the module by ignoring failures to
1124 * Permit multiple loads of the module by ignoring failures to
1125 * create the bonding_masters sysfs file. Bonding devices 745 * create the bonding_masters sysfs file. Bonding devices
1126 * created by second or subsequent loads of the module will 746 * created by second or subsequent loads of the module will
1127 * not be listed in, or controllable by, bonding_masters, but 747 * not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
1144 764
1145} 765}
1146 766
1147/* 767/* Remove /sys/class/net/bonding_masters. */
1148 * Remove /sys/class/net/bonding_masters.
1149 */
1150void bond_destroy_sysfs(struct bond_net *bn) 768void bond_destroy_sysfs(struct bond_net *bn)
1151{ 769{
1152 netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net); 770 netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
1153} 771}
1154 772
1155/* 773/* Initialize sysfs for each bond. This sets up and registers
1156 * Initialize sysfs for each bond. This sets up and registers
1157 * the 'bondctl' directory for each individual bond under /sys/class/net. 774 * the 'bondctl' directory for each individual bond under /sys/class/net.
1158 */ 775 */
1159void bond_prepare_sysfs_group(struct bonding *bond) 776void bond_prepare_sysfs_group(struct bonding *bond)
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2e4eec5450c8..198677f58ce0 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -69,8 +69,8 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
69{ 69{
70 const struct aggregator *agg; 70 const struct aggregator *agg;
71 71
72 if (slave->bond->params.mode == BOND_MODE_8023AD) { 72 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
73 agg = SLAVE_AD_INFO(slave).port.aggregator; 73 agg = SLAVE_AD_INFO(slave)->port.aggregator;
74 if (agg) 74 if (agg)
75 return sprintf(buf, "%d\n", 75 return sprintf(buf, "%d\n",
76 agg->aggregator_identifier); 76 agg->aggregator_identifier);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 00bea320e3b5..0b4d9cde0b05 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -41,42 +41,6 @@
41 41
42#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
43 43
44#define IS_UP(dev) \
45 ((((dev)->flags & IFF_UP) == IFF_UP) && \
46 netif_running(dev) && \
47 netif_carrier_ok(dev))
48
49/*
50 * Checks whether slave is ready for transmit.
51 */
52#define SLAVE_IS_OK(slave) \
53 (((slave)->dev->flags & IFF_UP) && \
54 netif_running((slave)->dev) && \
55 ((slave)->link == BOND_LINK_UP) && \
56 bond_is_active_slave(slave))
57
58
59#define USES_PRIMARY(mode) \
60 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
61 ((mode) == BOND_MODE_TLB) || \
62 ((mode) == BOND_MODE_ALB))
63
64#define BOND_NO_USES_ARP(mode) \
65 (((mode) == BOND_MODE_8023AD) || \
66 ((mode) == BOND_MODE_TLB) || \
67 ((mode) == BOND_MODE_ALB))
68
69#define TX_QUEUE_OVERRIDE(mode) \
70 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
71 ((mode) == BOND_MODE_ROUNDROBIN))
72
73#define BOND_MODE_IS_LB(mode) \
74 (((mode) == BOND_MODE_TLB) || \
75 ((mode) == BOND_MODE_ALB))
76
77#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \
78 ((htonl(INADDR_BROADCAST) == a) || \
79 ipv4_is_zeronet(a))
80/* 44/*
81 * Less bad way to call ioctl from within the kernel; this needs to be 45 * Less bad way to call ioctl from within the kernel; this needs to be
82 * done some other way to get the call out of interrupt context. 46 * done some other way to get the call out of interrupt context.
@@ -90,6 +54,8 @@
90 set_fs(fs); \ 54 set_fs(fs); \
91 res; }) 55 res; })
92 56
57#define BOND_MODE(bond) ((bond)->params.mode)
58
93/* slave list primitives */ 59/* slave list primitives */
94#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) 60#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
95 61
@@ -175,6 +141,7 @@ struct bond_params {
175 int resend_igmp; 141 int resend_igmp;
176 int lp_interval; 142 int lp_interval;
177 int packets_per_slave; 143 int packets_per_slave;
144 int tlb_dynamic_lb;
178 struct reciprocal_value reciprocal_packets_per_slave; 145 struct reciprocal_value reciprocal_packets_per_slave;
179}; 146};
180 147
@@ -183,8 +150,6 @@ struct bond_parm_tbl {
183 int mode; 150 int mode;
184}; 151};
185 152
186#define BOND_MAX_MODENAME_LEN 20
187
188struct slave { 153struct slave {
189 struct net_device *dev; /* first - useful for panic debug */ 154 struct net_device *dev; /* first - useful for panic debug */
190 struct bonding *bond; /* our master */ 155 struct bonding *bond; /* our master */
@@ -205,7 +170,7 @@ struct slave {
205 u32 speed; 170 u32 speed;
206 u16 queue_id; 171 u16 queue_id;
207 u8 perm_hwaddr[ETH_ALEN]; 172 u8 perm_hwaddr[ETH_ALEN];
208 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 173 struct ad_slave_info *ad_info;
209 struct tlb_slave_info tlb_info; 174 struct tlb_slave_info tlb_info;
210#ifdef CONFIG_NET_POLL_CONTROLLER 175#ifdef CONFIG_NET_POLL_CONTROLLER
211 struct netpoll *np; 176 struct netpoll *np;
@@ -285,14 +250,41 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
285 250
286static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 251static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
287{ 252{
288 if (!slave || !slave->bond)
289 return NULL;
290 return slave->bond; 253 return slave->bond;
291} 254}
292 255
256static inline bool bond_should_override_tx_queue(struct bonding *bond)
257{
258 return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
259 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
260}
261
293static inline bool bond_is_lb(const struct bonding *bond) 262static inline bool bond_is_lb(const struct bonding *bond)
294{ 263{
295 return BOND_MODE_IS_LB(bond->params.mode); 264 return BOND_MODE(bond) == BOND_MODE_TLB ||
265 BOND_MODE(bond) == BOND_MODE_ALB;
266}
267
268static inline bool bond_mode_uses_arp(int mode)
269{
270 return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
271 mode != BOND_MODE_ALB;
272}
273
274static inline bool bond_mode_uses_primary(int mode)
275{
276 return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
277 mode == BOND_MODE_ALB;
278}
279
280static inline bool bond_uses_primary(struct bonding *bond)
281{
282 return bond_mode_uses_primary(BOND_MODE(bond));
283}
284
285static inline bool bond_slave_is_up(struct slave *slave)
286{
287 return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
296} 288}
297 289
298static inline void bond_set_active_slave(struct slave *slave) 290static inline void bond_set_active_slave(struct slave *slave)
@@ -365,6 +357,12 @@ static inline bool bond_is_active_slave(struct slave *slave)
365 return !bond_slave_state(slave); 357 return !bond_slave_state(slave);
366} 358}
367 359
360static inline bool bond_slave_can_tx(struct slave *slave)
361{
362 return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
363 bond_is_active_slave(slave);
364}
365
368#define BOND_PRI_RESELECT_ALWAYS 0 366#define BOND_PRI_RESELECT_ALWAYS 0
369#define BOND_PRI_RESELECT_BETTER 1 367#define BOND_PRI_RESELECT_BETTER 1
370#define BOND_PRI_RESELECT_FAILURE 2 368#define BOND_PRI_RESELECT_FAILURE 2
@@ -396,12 +394,16 @@ static inline int slave_do_arp_validate(struct bonding *bond,
396 return bond->params.arp_validate & (1 << bond_slave_state(slave)); 394 return bond->params.arp_validate & (1 << bond_slave_state(slave));
397} 395}
398 396
399static inline int slave_do_arp_validate_only(struct bonding *bond, 397static inline int slave_do_arp_validate_only(struct bonding *bond)
400 struct slave *slave)
401{ 398{
402 return bond->params.arp_validate & BOND_ARP_FILTER; 399 return bond->params.arp_validate & BOND_ARP_FILTER;
403} 400}
404 401
402static inline int bond_is_ip_target_ok(__be32 addr)
403{
404 return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
405}
406
405/* Get the oldest arp which we've received on this slave for bond's 407/* Get the oldest arp which we've received on this slave for bond's
406 * arp_targets. 408 * arp_targets.
407 */ 409 */
@@ -479,16 +481,14 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
479 return addr; 481 return addr;
480} 482}
481 483
482static inline bool slave_can_tx(struct slave *slave) 484struct bond_net {
483{ 485 struct net *net; /* Associated network namespace */
484 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP && 486 struct list_head dev_list;
485 bond_is_active_slave(slave)) 487#ifdef CONFIG_PROC_FS
486 return true; 488 struct proc_dir_entry *proc_dir;
487 else 489#endif
488 return false; 490 struct class_attribute class_attr_bonding_masters;
489} 491};
490
491struct bond_net;
492 492
493int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); 493int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
494void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 494void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -500,7 +500,7 @@ int bond_sysfs_slave_add(struct slave *slave);
500void bond_sysfs_slave_del(struct slave *slave); 500void bond_sysfs_slave_del(struct slave *slave);
501int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 501int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
502int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 502int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
503int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count); 503u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
504void bond_select_active_slave(struct bonding *bond); 504void bond_select_active_slave(struct bonding *bond);
505void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 505void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
506void bond_create_debugfs(void); 506void bond_create_debugfs(void);
@@ -516,15 +516,9 @@ void bond_netlink_fini(void);
516struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); 516struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
517struct net_device *bond_option_active_slave_get(struct bonding *bond); 517struct net_device *bond_option_active_slave_get(struct bonding *bond);
518const char *bond_slave_link_status(s8 link); 518const char *bond_slave_link_status(s8 link);
519 519bool bond_verify_device_path(struct net_device *start_dev,
520struct bond_net { 520 struct net_device *end_dev,
521 struct net * net; /* Associated network namespace */ 521 struct bond_vlan_tag *tags);
522 struct list_head dev_list;
523#ifdef CONFIG_PROC_FS
524 struct proc_dir_entry * proc_dir;
525#endif
526 struct class_attribute class_attr_bonding_masters;
527};
528 522
529#ifdef CONFIG_PROC_FS 523#ifdef CONFIG_PROC_FS
530void bond_create_proc_entry(struct bonding *bond); 524void bond_create_proc_entry(struct bonding *bond);
@@ -576,6 +570,27 @@ static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
576 return NULL; 570 return NULL;
577} 571}
578 572
573/* Caller must hold rcu_read_lock() for read */
574static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
575{
576 struct list_head *iter;
577 struct slave *tmp;
578 struct netdev_hw_addr *ha;
579
580 bond_for_each_slave_rcu(bond, tmp, iter)
581 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
582 return true;
583
584 if (netdev_uc_empty(bond->dev))
585 return false;
586
587 netdev_for_each_uc_addr(ha, bond->dev)
588 if (ether_addr_equal_64bits(mac, ha->addr))
589 return true;
590
591 return false;
592}
593
579/* Check if the ip is present in arp ip list, or first free slot if ip == 0 594/* Check if the ip is present in arp ip list, or first free slot if ip == 0
580 * Returns -1 if not found, index if found 595 * Returns -1 if not found, index if found
581 */ 596 */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95dae2c7..41688229c570 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
65 65
66config CAN_AT91 66config CAN_AT91
67 tristate "Atmel AT91 onchip CAN controller" 67 tristate "Atmel AT91 onchip CAN controller"
68 depends on ARM 68 depends on ARCH_AT91 || COMPILE_TEST
69 ---help--- 69 ---help---
70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
71 and AT91SAM9X5 processors. 71 and AT91SAM9X5 processors.
@@ -77,12 +77,6 @@ config CAN_TI_HECC
77 Driver for TI HECC (High End CAN Controller) module found on many 77 Driver for TI HECC (High End CAN Controller) module found on many
78 TI devices. The device specifications are available from www.ti.com 78 TI devices. The device specifications are available from www.ti.com
79 79
80config CAN_MCP251X
81 tristate "Microchip MCP251x SPI CAN controllers"
82 depends on SPI && HAS_DMA
83 ---help---
84 Driver for the Microchip MCP251x SPI CAN controllers.
85
86config CAN_BFIN 80config CAN_BFIN
87 depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x 81 depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
88 tristate "Analog Devices Blackfin on-chip CAN" 82 tristate "Analog Devices Blackfin on-chip CAN"
@@ -110,7 +104,7 @@ config CAN_FLEXCAN
110 104
111config PCH_CAN 105config PCH_CAN
112 tristate "Intel EG20T PCH CAN controller" 106 tristate "Intel EG20T PCH CAN controller"
113 depends on PCI 107 depends on PCI && (X86_32 || COMPILE_TEST)
114 ---help--- 108 ---help---
115 This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which 109 This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
116 is an IOH for x86 embedded processor (Intel Atom E6xx series). 110 is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -125,6 +119,24 @@ config CAN_GRCAN
125 endian syntheses of the cores would need some modifications on 119 endian syntheses of the cores would need some modifications on
126 the hardware level to work. 120 the hardware level to work.
127 121
122config CAN_RCAR
123 tristate "Renesas R-Car CAN controller"
124 depends on ARM
125 ---help---
126 Say Y here if you want to use CAN controller found on Renesas R-Car
127 SoCs.
128
129 To compile this driver as a module, choose M here: the module will
130 be called rcar_can.
131
132config CAN_XILINXCAN
133 tristate "Xilinx CAN"
134 depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
135 depends on COMMON_CLK && HAS_IOMEM
136 ---help---
137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and
138 Zynq CANPS IP.
139
128source "drivers/net/can/mscan/Kconfig" 140source "drivers/net/can/mscan/Kconfig"
129 141
130source "drivers/net/can/sja1000/Kconfig" 142source "drivers/net/can/sja1000/Kconfig"
@@ -133,6 +145,8 @@ source "drivers/net/can/c_can/Kconfig"
133 145
134source "drivers/net/can/cc770/Kconfig" 146source "drivers/net/can/cc770/Kconfig"
135 147
148source "drivers/net/can/spi/Kconfig"
149
136source "drivers/net/can/usb/Kconfig" 150source "drivers/net/can/usb/Kconfig"
137 151
138source "drivers/net/can/softing/Kconfig" 152source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..1697f22353a9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
10 10
11can-dev-$(CONFIG_CAN_LEDS) += led.o 11can-dev-$(CONFIG_CAN_LEDS) += led.o
12 12
13obj-y += spi/
13obj-y += usb/ 14obj-y += usb/
14obj-y += softing/ 15obj-y += softing/
15 16
@@ -19,11 +20,12 @@ obj-$(CONFIG_CAN_C_CAN) += c_can/
19obj-$(CONFIG_CAN_CC770) += cc770/ 20obj-$(CONFIG_CAN_CC770) += cc770/
20obj-$(CONFIG_CAN_AT91) += at91_can.o 21obj-$(CONFIG_CAN_AT91) += at91_can.o
21obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 22obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
22obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
23obj-$(CONFIG_CAN_BFIN) += bfin_can.o 23obj-$(CONFIG_CAN_BFIN) += bfin_can.o
24obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 24obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
25obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o 25obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
26obj-$(CONFIG_PCH_CAN) += pch_can.o 26obj-$(CONFIG_PCH_CAN) += pch_can.o
27obj-$(CONFIG_CAN_GRCAN) += grcan.o 27obj-$(CONFIG_CAN_GRCAN) += grcan.o
28obj-$(CONFIG_CAN_RCAR) += rcar_can.o
29obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
28 30
29ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 31ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 95e04e2002da..8e78bb48f5a4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -252,8 +252,7 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj
252 struct c_can_priv *priv = netdev_priv(dev); 252 struct c_can_priv *priv = netdev_priv(dev);
253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface); 253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
254 254
255 priv->write_reg(priv, reg + 1, cmd); 255 priv->write_reg32(priv, reg, (cmd << 16) | obj);
256 priv->write_reg(priv, reg, obj);
257 256
258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) { 257 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY)) 258 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
@@ -328,8 +327,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
328 change_bit(idx, &priv->tx_dir); 327 change_bit(idx, &priv->tx_dir);
329 } 328 }
330 329
331 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb); 330 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
332 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
333 331
334 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 332 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
335 333
@@ -391,8 +389,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
391 389
392 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 390 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
393 391
394 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)); 392 arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
395 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
396 393
397 if (arb & IF_ARB_MSGXTD) 394 if (arb & IF_ARB_MSGXTD)
398 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG; 395 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -424,12 +421,10 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
424 struct c_can_priv *priv = netdev_priv(dev); 421 struct c_can_priv *priv = netdev_priv(dev);
425 422
426 mask |= BIT(29); 423 mask |= BIT(29);
427 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask); 424 priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
428 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
429 425
430 id |= IF_ARB_MSGVAL; 426 id |= IF_ARB_MSGVAL;
431 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id); 427 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
432 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
433 428
434 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); 429 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
435 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); 430 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index c56f1b1c11ca..99ad1aa576b0 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -78,6 +78,7 @@ enum reg {
78 C_CAN_INTPND2_REG, 78 C_CAN_INTPND2_REG,
79 C_CAN_MSGVAL1_REG, 79 C_CAN_MSGVAL1_REG,
80 C_CAN_MSGVAL2_REG, 80 C_CAN_MSGVAL2_REG,
81 C_CAN_FUNCTION_REG,
81}; 82};
82 83
83static const u16 reg_map_c_can[] = { 84static const u16 reg_map_c_can[] = {
@@ -129,6 +130,7 @@ static const u16 reg_map_d_can[] = {
129 [C_CAN_BRPEXT_REG] = 0x0E, 130 [C_CAN_BRPEXT_REG] = 0x0E,
130 [C_CAN_INT_REG] = 0x10, 131 [C_CAN_INT_REG] = 0x10,
131 [C_CAN_TEST_REG] = 0x14, 132 [C_CAN_TEST_REG] = 0x14,
133 [C_CAN_FUNCTION_REG] = 0x18,
132 [C_CAN_TXRQST1_REG] = 0x88, 134 [C_CAN_TXRQST1_REG] = 0x88,
133 [C_CAN_TXRQST2_REG] = 0x8A, 135 [C_CAN_TXRQST2_REG] = 0x8A,
134 [C_CAN_NEWDAT1_REG] = 0x9C, 136 [C_CAN_NEWDAT1_REG] = 0x9C,
@@ -176,8 +178,10 @@ struct c_can_priv {
176 atomic_t tx_active; 178 atomic_t tx_active;
177 unsigned long tx_dir; 179 unsigned long tx_dir;
178 int last_status; 180 int last_status;
179 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 181 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
180 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 182 void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
183 u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
184 void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
181 void __iomem *base; 185 void __iomem *base;
182 const u16 *regs; 186 const u16 *regs;
183 void *priv; /* for board-specific data */ 187 void *priv; /* for board-specific data */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index fe5f6303b584..5d11e0e4225b 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -19,9 +19,13 @@
19 19
20#include "c_can.h" 20#include "c_can.h"
21 21
22#define PCI_DEVICE_ID_PCH_CAN 0x8818
23#define PCH_PCI_SOFT_RESET 0x01fc
24
22enum c_can_pci_reg_align { 25enum c_can_pci_reg_align {
23 C_CAN_REG_ALIGN_16, 26 C_CAN_REG_ALIGN_16,
24 C_CAN_REG_ALIGN_32, 27 C_CAN_REG_ALIGN_32,
28 C_CAN_REG_32,
25}; 29};
26 30
27struct c_can_pci_data { 31struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
31 enum c_can_pci_reg_align reg_align; 35 enum c_can_pci_reg_align reg_align;
32 /* Set the frequency */ 36 /* Set the frequency */
33 unsigned int freq; 37 unsigned int freq;
38 /* PCI bar number */
39 int bar;
40 /* Callback for reset */
41 void (*init)(const struct c_can_priv *priv, bool enable);
34}; 42};
35 43
36/* 44/*
@@ -39,30 +47,70 @@ struct c_can_pci_data {
39 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. 47 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
40 * Handle the same by providing a common read/write interface. 48 * Handle the same by providing a common read/write interface.
41 */ 49 */
42static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv, 50static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
43 enum reg index) 51 enum reg index)
44{ 52{
45 return readw(priv->base + priv->regs[index]); 53 return readw(priv->base + priv->regs[index]);
46} 54}
47 55
48static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv, 56static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
49 enum reg index, u16 val) 57 enum reg index, u16 val)
50{ 58{
51 writew(val, priv->base + priv->regs[index]); 59 writew(val, priv->base + priv->regs[index]);
52} 60}
53 61
54static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv, 62static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
55 enum reg index) 63 enum reg index)
56{ 64{
57 return readw(priv->base + 2 * priv->regs[index]); 65 return readw(priv->base + 2 * priv->regs[index]);
58} 66}
59 67
60static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv, 68static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
61 enum reg index, u16 val) 69 enum reg index, u16 val)
62{ 70{
63 writew(val, priv->base + 2 * priv->regs[index]); 71 writew(val, priv->base + 2 * priv->regs[index]);
64} 72}
65 73
74static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
75 enum reg index)
76{
77 return (u16)ioread32(priv->base + 2 * priv->regs[index]);
78}
79
80static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
81 enum reg index, u16 val)
82{
83 iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
84}
85
86static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
87{
88 u32 val;
89
90 val = priv->read_reg(priv, index);
91 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
92
93 return val;
94}
95
96static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
97 u32 val)
98{
99 priv->write_reg(priv, index + 1, val >> 16);
100 priv->write_reg(priv, index, val);
101}
102
103static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
104{
105 if (enable) {
106 u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
107
108 /* write to sw reset register */
109 iowrite32(1, addr);
110 iowrite32(0, addr);
111 }
112}
113
66static int c_can_pci_probe(struct pci_dev *pdev, 114static int c_can_pci_probe(struct pci_dev *pdev,
67 const struct pci_device_id *ent) 115 const struct pci_device_id *ent)
68{ 116{
@@ -90,7 +138,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
90 pci_set_master(pdev); 138 pci_set_master(pdev);
91 } 139 }
92 140
93 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 141 addr = pci_iomap(pdev, c_can_pci_data->bar,
142 pci_resource_len(pdev, c_can_pci_data->bar));
94 if (!addr) { 143 if (!addr) {
95 dev_err(&pdev->dev, 144 dev_err(&pdev->dev,
96 "device has no PCI memory resources, " 145 "device has no PCI memory resources, "
@@ -147,10 +196,18 @@ static int c_can_pci_probe(struct pci_dev *pdev,
147 priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; 196 priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
148 priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; 197 priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
149 break; 198 break;
199 case C_CAN_REG_32:
200 priv->read_reg = c_can_pci_read_reg_32bit;
201 priv->write_reg = c_can_pci_write_reg_32bit;
202 break;
150 default: 203 default:
151 ret = -EINVAL; 204 ret = -EINVAL;
152 goto out_free_c_can; 205 goto out_free_c_can;
153 } 206 }
207 priv->read_reg32 = c_can_pci_read_reg32;
208 priv->write_reg32 = c_can_pci_write_reg32;
209
210 priv->raminit = c_can_pci_data->init;
154 211
155 ret = register_c_can_dev(dev); 212 ret = register_c_can_dev(dev);
156 if (ret) { 213 if (ret) {
@@ -198,6 +255,15 @@ static struct c_can_pci_data c_can_sta2x11= {
198 .type = BOSCH_C_CAN, 255 .type = BOSCH_C_CAN,
199 .reg_align = C_CAN_REG_ALIGN_32, 256 .reg_align = C_CAN_REG_ALIGN_32,
200 .freq = 52000000, /* 52 Mhz */ 257 .freq = 52000000, /* 52 Mhz */
258 .bar = 0,
259};
260
261static struct c_can_pci_data c_can_pch = {
262 .type = BOSCH_C_CAN,
263 .reg_align = C_CAN_REG_32,
264 .freq = 50000000, /* 50 MHz */
265 .init = c_can_pci_reset_pch,
266 .bar = 1,
201}; 267};
202 268
203#define C_CAN_ID(_vend, _dev, _driverdata) { \ 269#define C_CAN_ID(_vend, _dev, _driverdata) { \
@@ -207,6 +273,8 @@ static struct c_can_pci_data c_can_sta2x11= {
207static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = { 273static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
208 C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN, 274 C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
209 c_can_sta2x11), 275 c_can_sta2x11),
276 C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
277 c_can_pch),
210 {}, 278 {},
211}; 279};
212static struct pci_driver c_can_pci_driver = { 280static struct pci_driver c_can_pci_driver = {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 1df0b322d1e4..824108cd9fd5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -40,6 +40,7 @@
40#define CAN_RAMINIT_START_MASK(i) (0x001 << (i)) 40#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
41#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i)) 41#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
42#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i)) 42#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
43#define DCAN_RAM_INIT_BIT (1 << 3)
43static DEFINE_SPINLOCK(raminit_lock); 44static DEFINE_SPINLOCK(raminit_lock);
44/* 45/*
45 * 16-bit c_can registers can be arranged differently in the memory 46 * 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
47 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. 48 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
48 * Handle the same by providing a common read/write interface. 49 * Handle the same by providing a common read/write interface.
49 */ 50 */
50static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv, 51static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
51 enum reg index) 52 enum reg index)
52{ 53{
53 return readw(priv->base + priv->regs[index]); 54 return readw(priv->base + priv->regs[index]);
54} 55}
55 56
56static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv, 57static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
57 enum reg index, u16 val) 58 enum reg index, u16 val)
58{ 59{
59 writew(val, priv->base + priv->regs[index]); 60 writew(val, priv->base + priv->regs[index]);
60} 61}
61 62
62static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv, 63static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
63 enum reg index) 64 enum reg index)
64{ 65{
65 return readw(priv->base + 2 * priv->regs[index]); 66 return readw(priv->base + 2 * priv->regs[index]);
66} 67}
67 68
68static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, 69static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
69 enum reg index, u16 val) 70 enum reg index, u16 val)
70{ 71{
71 writew(val, priv->base + 2 * priv->regs[index]); 72 writew(val, priv->base + 2 * priv->regs[index]);
72} 73}
73 74
74static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask, 75static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
75 u32 val) 76 u32 val)
76{ 77{
77 /* We look only at the bits of our instance. */ 78 /* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
80 udelay(1); 81 udelay(1);
81} 82}
82 83
83static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) 84static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
84{ 85{
85 u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance); 86 u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
86 u32 ctrl; 87 u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
96 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); 97 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
97 writel(ctrl, priv->raminit_ctrlreg); 98 writel(ctrl, priv->raminit_ctrlreg);
98 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); 99 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
99 c_can_hw_raminit_wait(priv, ctrl, mask); 100 c_can_hw_raminit_wait_ti(priv, ctrl, mask);
100 101
101 if (enable) { 102 if (enable) {
102 /* Set start bit and wait for the done bit. */ 103 /* Set start bit and wait for the done bit. */
103 ctrl |= CAN_RAMINIT_START_MASK(priv->instance); 104 ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
104 writel(ctrl, priv->raminit_ctrlreg); 105 writel(ctrl, priv->raminit_ctrlreg);
105 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); 106 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
106 c_can_hw_raminit_wait(priv, ctrl, mask); 107 c_can_hw_raminit_wait_ti(priv, ctrl, mask);
107 } 108 }
108 spin_unlock(&raminit_lock); 109 spin_unlock(&raminit_lock);
109} 110}
110 111
112static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
113{
114 u32 val;
115
116 val = priv->read_reg(priv, index);
117 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
118
119 return val;
120}
121
122static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
123 u32 val)
124{
125 priv->write_reg(priv, index + 1, val >> 16);
126 priv->write_reg(priv, index, val);
127}
128
129static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
130{
131 return readl(priv->base + priv->regs[index]);
132}
133
134static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
135 u32 val)
136{
137 writel(val, priv->base + priv->regs[index]);
138}
139
140static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
141{
142 while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
143 udelay(1);
144}
145
146static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
147{
148 u32 ctrl;
149
150 ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
151 ctrl &= ~DCAN_RAM_INIT_BIT;
152 priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
153 c_can_hw_raminit_wait(priv, ctrl);
154
155 if (enable) {
156 ctrl |= DCAN_RAM_INIT_BIT;
157 priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
158 c_can_hw_raminit_wait(priv, ctrl);
159 }
160}
161
111static struct platform_device_id c_can_id_table[] = { 162static struct platform_device_id c_can_id_table[] = {
112 [BOSCH_C_CAN_PLATFORM] = { 163 [BOSCH_C_CAN_PLATFORM] = {
113 .name = KBUILD_MODNAME, 164 .name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
201 case IORESOURCE_MEM_32BIT: 252 case IORESOURCE_MEM_32BIT:
202 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit; 253 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
203 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit; 254 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
255 priv->read_reg32 = c_can_plat_read_reg32;
256 priv->write_reg32 = c_can_plat_write_reg32;
204 break; 257 break;
205 case IORESOURCE_MEM_16BIT: 258 case IORESOURCE_MEM_16BIT:
206 default: 259 default:
207 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 260 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
208 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 261 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
262 priv->read_reg32 = c_can_plat_read_reg32;
263 priv->write_reg32 = c_can_plat_write_reg32;
209 break; 264 break;
210 } 265 }
211 break; 266 break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
214 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 269 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
215 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 270 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
216 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 271 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
272 priv->read_reg32 = d_can_plat_read_reg32;
273 priv->write_reg32 = d_can_plat_write_reg32;
217 274
218 if (pdev->dev.of_node) 275 if (pdev->dev.of_node)
219 priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can"); 276 priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
221 priv->instance = pdev->id; 278 priv->instance = pdev->id;
222 279
223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 280 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
281 /* Not all D_CAN modules have a separate register for the D_CAN
282 * RAM initialization. Use default RAM init bit in D_CAN module
283 * if not specified in DT.
284 */
285 if (!res) {
286 priv->raminit = c_can_hw_raminit;
287 break;
288 }
289
224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 290 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
225 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 291 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
226 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 292 dev_info(&pdev->dev, "control memory is not used for raminit\n");
227 else 293 else
228 priv->raminit = c_can_hw_raminit; 294 priv->raminit = c_can_hw_raminit_ti;
229 break; 295 break;
230 default: 296 default:
231 ret = -EINVAL; 297 ret = -EINVAL;
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index f19be5269e7b..81c711719490 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
1config CAN_MSCAN 1config CAN_MSCAN
2 depends on PPC || M68K 2 depends on PPC
3 tristate "Support for Freescale MSCAN based chips" 3 tristate "Support for Freescale MSCAN based chips"
4 ---help--- 4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition 5 The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644
index 000000000000..5268d216ecfa
--- /dev/null
+++ b/drivers/net/can/rcar_can.c
@@ -0,0 +1,876 @@
1/* Renesas R-Car CAN device driver
2 *
3 * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
4 * Copyright (C) 2013 Renesas Solutions Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/netdevice.h>
18#include <linux/platform_device.h>
19#include <linux/can/led.h>
20#include <linux/can/dev.h>
21#include <linux/clk.h>
22#include <linux/can/platform/rcar_can.h>
23
24#define RCAR_CAN_DRV_NAME "rcar_can"
25
26/* Mailbox configuration:
27 * mailbox 60 - 63 - Rx FIFO mailboxes
28 * mailbox 56 - 59 - Tx FIFO mailboxes
29 * non-FIFO mailboxes are not used
30 */
31#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */
32#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */
33#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */
34#define RCAR_CAN_FIFO_DEPTH 4
35
36/* Mailbox registers structure */
37struct rcar_can_mbox_regs {
38 u32 id; /* IDE and RTR bits, SID and EID */
39 u8 stub; /* Not used */
40 u8 dlc; /* Data Length Code - bits [0..3] */
41 u8 data[8]; /* Data Bytes */
42 u8 tsh; /* Time Stamp Higher Byte */
43 u8 tsl; /* Time Stamp Lower Byte */
44};
45
46struct rcar_can_regs {
47 struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
48 u32 mkr_2_9[8]; /* Mask Registers 2-9 */
49 u32 fidcr[2]; /* FIFO Received ID Compare Register */
50 u32 mkivlr1; /* Mask Invalid Register 1 */
51 u32 mier1; /* Mailbox Interrupt Enable Register 1 */
52 u32 mkr_0_1[2]; /* Mask Registers 0-1 */
53 u32 mkivlr0; /* Mask Invalid Register 0*/
54 u32 mier0; /* Mailbox Interrupt Enable Register 0 */
55 u8 pad_440[0x3c0];
56 u8 mctl[64]; /* Message Control Registers */
57 u16 ctlr; /* Control Register */
58 u16 str; /* Status register */
59 u8 bcr[3]; /* Bit Configuration Register */
60 u8 clkr; /* Clock Select Register */
61 u8 rfcr; /* Receive FIFO Control Register */
62 u8 rfpcr; /* Receive FIFO Pointer Control Register */
63 u8 tfcr; /* Transmit FIFO Control Register */
64 u8 tfpcr; /* Transmit FIFO Pointer Control Register */
65 u8 eier; /* Error Interrupt Enable Register */
66 u8 eifr; /* Error Interrupt Factor Judge Register */
67 u8 recr; /* Receive Error Count Register */
68 u8 tecr; /* Transmit Error Count Register */
69 u8 ecsr; /* Error Code Store Register */
70 u8 cssr; /* Channel Search Support Register */
71 u8 mssr; /* Mailbox Search Status Register */
72 u8 msmr; /* Mailbox Search Mode Register */
73 u16 tsr; /* Time Stamp Register */
74 u8 afsr; /* Acceptance Filter Support Register */
75 u8 pad_857;
76 u8 tcr; /* Test Control Register */
77 u8 pad_859[7];
78 u8 ier; /* Interrupt Enable Register */
79 u8 isr; /* Interrupt Status Register */
80 u8 pad_862;
81 u8 mbsmr; /* Mailbox Search Mask Register */
82};
83
84struct rcar_can_priv {
85 struct can_priv can; /* Must be the first member! */
86 struct net_device *ndev;
87 struct napi_struct napi;
88 struct rcar_can_regs __iomem *regs;
89 struct clk *clk;
90 u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
91 u32 tx_head;
92 u32 tx_tail;
93 u8 clock_select;
94 u8 ier;
95};
96
97static const struct can_bittiming_const rcar_can_bittiming_const = {
98 .name = RCAR_CAN_DRV_NAME,
99 .tseg1_min = 4,
100 .tseg1_max = 16,
101 .tseg2_min = 2,
102 .tseg2_max = 8,
103 .sjw_max = 4,
104 .brp_min = 1,
105 .brp_max = 1024,
106 .brp_inc = 1,
107};
108
109/* Control Register bits */
110#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
111#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
112 /* at bus-off entry */
113#define RCAR_CAN_CTLR_SLPM (1 << 10)
114#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
115#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
116#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
117#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
118#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
119#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
120#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
121#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
122
123/* Status Register bits */
124#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
125
126/* FIFO Received ID Compare Registers 0 and 1 bits */
127#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
128#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
129
130/* Receive FIFO Control Register bits */
131#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
132#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
133
134/* Transmit FIFO Control Register bits */
135#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
136 /* Number Status Bits */
137#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
138 /* Message Number Status Bits */
139#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
140
141#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
142 /* for Rx mailboxes 0-31 */
143#define RCAR_CAN_N_RX_MKREGS2 8
144
145/* Bit Configuration Register settings */
146#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
147#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
148#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
149#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
150
151/* Mailbox and Mask Registers bits */
152#define RCAR_CAN_IDE (1 << 31)
153#define RCAR_CAN_RTR (1 << 30)
154#define RCAR_CAN_SID_SHIFT 18
155
156/* Mailbox Interrupt Enable Register 1 bits */
157#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
158#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
159
160/* Interrupt Enable Register bits */
161#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
162#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
163 /* Enable Bit */
164#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
165 /* Enable Bit */
166/* Interrupt Status Register bits */
167#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
168#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
169 /* Status Bit */
170#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
171 /* Status Bit */
172
173/* Error Interrupt Enable Register bits */
174#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
175#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
176 /* Interrupt Enable */
177#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
178#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
179#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
180#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
181#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
182#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
183
184/* Error Interrupt Factor Judge Register bits */
185#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
186#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
187 /* Detect Flag */
188#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
189#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
190#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
191#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
192#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
193#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
194
195/* Error Code Store Register bits */
196#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
197#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
198#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
199#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
200#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
201#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
202#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
203#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
204
205#define RCAR_CAN_NAPI_WEIGHT 4
206#define MAX_STR_READS 0x100
207
208static void tx_failure_cleanup(struct net_device *ndev)
209{
210 int i;
211
212 for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
213 can_free_echo_skb(ndev, i);
214}
215
216static void rcar_can_error(struct net_device *ndev)
217{
218 struct rcar_can_priv *priv = netdev_priv(ndev);
219 struct net_device_stats *stats = &ndev->stats;
220 struct can_frame *cf;
221 struct sk_buff *skb;
222 u8 eifr, txerr = 0, rxerr = 0;
223
224 /* Propagate the error condition to the CAN stack */
225 skb = alloc_can_err_skb(ndev, &cf);
226
227 eifr = readb(&priv->regs->eifr);
228 if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
229 txerr = readb(&priv->regs->tecr);
230 rxerr = readb(&priv->regs->recr);
231 if (skb) {
232 cf->can_id |= CAN_ERR_CRTL;
233 cf->data[6] = txerr;
234 cf->data[7] = rxerr;
235 }
236 }
237 if (eifr & RCAR_CAN_EIFR_BEIF) {
238 int rx_errors = 0, tx_errors = 0;
239 u8 ecsr;
240
241 netdev_dbg(priv->ndev, "Bus error interrupt:\n");
242 if (skb) {
243 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
244 cf->data[2] = CAN_ERR_PROT_UNSPEC;
245 }
246 ecsr = readb(&priv->regs->ecsr);
247 if (ecsr & RCAR_CAN_ECSR_ADEF) {
248 netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
249 tx_errors++;
250 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
251 if (skb)
252 cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
253 }
254 if (ecsr & RCAR_CAN_ECSR_BE0F) {
255 netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
256 tx_errors++;
257 writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
258 if (skb)
259 cf->data[2] |= CAN_ERR_PROT_BIT0;
260 }
261 if (ecsr & RCAR_CAN_ECSR_BE1F) {
262 netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
263 tx_errors++;
264 writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
265 if (skb)
266 cf->data[2] |= CAN_ERR_PROT_BIT1;
267 }
268 if (ecsr & RCAR_CAN_ECSR_CEF) {
269 netdev_dbg(priv->ndev, "CRC Error\n");
270 rx_errors++;
271 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
272 if (skb)
273 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
274 }
275 if (ecsr & RCAR_CAN_ECSR_AEF) {
276 netdev_dbg(priv->ndev, "ACK Error\n");
277 tx_errors++;
278 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
279 if (skb) {
280 cf->can_id |= CAN_ERR_ACK;
281 cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
282 }
283 }
284 if (ecsr & RCAR_CAN_ECSR_FEF) {
285 netdev_dbg(priv->ndev, "Form Error\n");
286 rx_errors++;
287 writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
288 if (skb)
289 cf->data[2] |= CAN_ERR_PROT_FORM;
290 }
291 if (ecsr & RCAR_CAN_ECSR_SEF) {
292 netdev_dbg(priv->ndev, "Stuff Error\n");
293 rx_errors++;
294 writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
295 if (skb)
296 cf->data[2] |= CAN_ERR_PROT_STUFF;
297 }
298
299 priv->can.can_stats.bus_error++;
300 ndev->stats.rx_errors += rx_errors;
301 ndev->stats.tx_errors += tx_errors;
302 writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
303 }
304 if (eifr & RCAR_CAN_EIFR_EWIF) {
305 netdev_dbg(priv->ndev, "Error warning interrupt\n");
306 priv->can.state = CAN_STATE_ERROR_WARNING;
307 priv->can.can_stats.error_warning++;
308 /* Clear interrupt condition */
309 writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
310 if (skb)
311 cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
312 CAN_ERR_CRTL_RX_WARNING;
313 }
314 if (eifr & RCAR_CAN_EIFR_EPIF) {
315 netdev_dbg(priv->ndev, "Error passive interrupt\n");
316 priv->can.state = CAN_STATE_ERROR_PASSIVE;
317 priv->can.can_stats.error_passive++;
318 /* Clear interrupt condition */
319 writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
320 if (skb)
321 cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
322 CAN_ERR_CRTL_RX_PASSIVE;
323 }
324 if (eifr & RCAR_CAN_EIFR_BOEIF) {
325 netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
326 tx_failure_cleanup(ndev);
327 priv->ier = RCAR_CAN_IER_ERSIE;
328 writeb(priv->ier, &priv->regs->ier);
329 priv->can.state = CAN_STATE_BUS_OFF;
330 /* Clear interrupt condition */
331 writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
332 can_bus_off(ndev);
333 if (skb)
334 cf->can_id |= CAN_ERR_BUSOFF;
335 }
336 if (eifr & RCAR_CAN_EIFR_ORIF) {
337 netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
338 ndev->stats.rx_over_errors++;
339 ndev->stats.rx_errors++;
340 writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
341 if (skb) {
342 cf->can_id |= CAN_ERR_CRTL;
343 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
344 }
345 }
346 if (eifr & RCAR_CAN_EIFR_OLIF) {
347 netdev_dbg(priv->ndev,
348 "Overload Frame Transmission error interrupt\n");
349 ndev->stats.rx_over_errors++;
350 ndev->stats.rx_errors++;
351 writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
352 if (skb) {
353 cf->can_id |= CAN_ERR_PROT;
354 cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
355 }
356 }
357
358 if (skb) {
359 stats->rx_packets++;
360 stats->rx_bytes += cf->can_dlc;
361 netif_rx(skb);
362 }
363}
364
365static void rcar_can_tx_done(struct net_device *ndev)
366{
367 struct rcar_can_priv *priv = netdev_priv(ndev);
368 struct net_device_stats *stats = &ndev->stats;
369 u8 isr;
370
371 while (1) {
372 u8 unsent = readb(&priv->regs->tfcr);
373
374 unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
375 RCAR_CAN_TFCR_TFUST_SHIFT;
376 if (priv->tx_head - priv->tx_tail <= unsent)
377 break;
378 stats->tx_packets++;
379 stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
380 RCAR_CAN_FIFO_DEPTH];
381 priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
382 can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
383 priv->tx_tail++;
384 netif_wake_queue(ndev);
385 }
386 /* Clear interrupt */
387 isr = readb(&priv->regs->isr);
388 writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
389 can_led_event(ndev, CAN_LED_EVENT_TX);
390}
391
392static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
393{
394 struct net_device *ndev = dev_id;
395 struct rcar_can_priv *priv = netdev_priv(ndev);
396 u8 isr;
397
398 isr = readb(&priv->regs->isr);
399 if (!(isr & priv->ier))
400 return IRQ_NONE;
401
402 if (isr & RCAR_CAN_ISR_ERSF)
403 rcar_can_error(ndev);
404
405 if (isr & RCAR_CAN_ISR_TXFF)
406 rcar_can_tx_done(ndev);
407
408 if (isr & RCAR_CAN_ISR_RXFF) {
409 if (napi_schedule_prep(&priv->napi)) {
410 /* Disable Rx FIFO interrupts */
411 priv->ier &= ~RCAR_CAN_IER_RXFIE;
412 writeb(priv->ier, &priv->regs->ier);
413 __napi_schedule(&priv->napi);
414 }
415 }
416
417 return IRQ_HANDLED;
418}
419
420static void rcar_can_set_bittiming(struct net_device *dev)
421{
422 struct rcar_can_priv *priv = netdev_priv(dev);
423 struct can_bittiming *bt = &priv->can.bittiming;
424 u32 bcr;
425
426 bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
427 RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
428 RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
429 /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
430 * All the registers are big-endian but they get byte-swapped on 32-bit
431 * read/write (but not on 8-bit, contrary to the manuals)...
432 */
433 writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
434}
435
436static void rcar_can_start(struct net_device *ndev)
437{
438 struct rcar_can_priv *priv = netdev_priv(ndev);
439 u16 ctlr;
440 int i;
441
442 /* Set controller to known mode:
443 * - FIFO mailbox mode
444 * - accept all messages
445 * - overrun mode
446 * CAN is in sleep mode after MCU hardware or software reset.
447 */
448 ctlr = readw(&priv->regs->ctlr);
449 ctlr &= ~RCAR_CAN_CTLR_SLPM;
450 writew(ctlr, &priv->regs->ctlr);
451 /* Go to reset mode */
452 ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
453 writew(ctlr, &priv->regs->ctlr);
454 for (i = 0; i < MAX_STR_READS; i++) {
455 if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
456 break;
457 }
458 rcar_can_set_bittiming(ndev);
459 ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
460 ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
461 /* at bus-off */
462 ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
463 ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
464 writew(ctlr, &priv->regs->ctlr);
465
466 /* Accept all SID and EID */
467 writel(0, &priv->regs->mkr_2_9[6]);
468 writel(0, &priv->regs->mkr_2_9[7]);
469 /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
470 writel(0, &priv->regs->mkivlr1);
471 /* Accept all frames */
472 writel(0, &priv->regs->fidcr[0]);
473 writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
474 /* Enable and configure FIFO mailbox interrupts */
475 writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
476
477 priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
478 RCAR_CAN_IER_TXFIE;
479 writeb(priv->ier, &priv->regs->ier);
480
481 /* Accumulate error codes */
482 writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
483 /* Enable error interrupts */
484 writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
485 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
486 RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
487 RCAR_CAN_EIER_OLIE, &priv->regs->eier);
488 priv->can.state = CAN_STATE_ERROR_ACTIVE;
489
490 /* Go to operation mode */
491 writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
492 for (i = 0; i < MAX_STR_READS; i++) {
493 if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
494 break;
495 }
496 /* Enable Rx and Tx FIFO */
497 writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
498 writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
499}
500
501static int rcar_can_open(struct net_device *ndev)
502{
503 struct rcar_can_priv *priv = netdev_priv(ndev);
504 int err;
505
506 err = clk_prepare_enable(priv->clk);
507 if (err) {
508 netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
509 err);
510 goto out;
511 }
512 err = open_candev(ndev);
513 if (err) {
514 netdev_err(ndev, "open_candev() failed, error %d\n", err);
515 goto out_clock;
516 }
517 napi_enable(&priv->napi);
518 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
519 if (err) {
520 netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
521 goto out_close;
522 }
523 can_led_event(ndev, CAN_LED_EVENT_OPEN);
524 rcar_can_start(ndev);
525 netif_start_queue(ndev);
526 return 0;
527out_close:
528 napi_disable(&priv->napi);
529 close_candev(ndev);
530out_clock:
531 clk_disable_unprepare(priv->clk);
532out:
533 return err;
534}
535
536static void rcar_can_stop(struct net_device *ndev)
537{
538 struct rcar_can_priv *priv = netdev_priv(ndev);
539 u16 ctlr;
540 int i;
541
542 /* Go to (force) reset mode */
543 ctlr = readw(&priv->regs->ctlr);
544 ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
545 writew(ctlr, &priv->regs->ctlr);
546 for (i = 0; i < MAX_STR_READS; i++) {
547 if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
548 break;
549 }
550 writel(0, &priv->regs->mier0);
551 writel(0, &priv->regs->mier1);
552 writeb(0, &priv->regs->ier);
553 writeb(0, &priv->regs->eier);
554 /* Go to sleep mode */
555 ctlr |= RCAR_CAN_CTLR_SLPM;
556 writew(ctlr, &priv->regs->ctlr);
557 priv->can.state = CAN_STATE_STOPPED;
558}
559
560static int rcar_can_close(struct net_device *ndev)
561{
562 struct rcar_can_priv *priv = netdev_priv(ndev);
563
564 netif_stop_queue(ndev);
565 rcar_can_stop(ndev);
566 free_irq(ndev->irq, ndev);
567 napi_disable(&priv->napi);
568 clk_disable_unprepare(priv->clk);
569 close_candev(ndev);
570 can_led_event(ndev, CAN_LED_EVENT_STOP);
571 return 0;
572}
573
574static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
575 struct net_device *ndev)
576{
577 struct rcar_can_priv *priv = netdev_priv(ndev);
578 struct can_frame *cf = (struct can_frame *)skb->data;
579 u32 data, i;
580
581 if (can_dropped_invalid_skb(ndev, skb))
582 return NETDEV_TX_OK;
583
584 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
585 data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
586 else /* Standard frame format */
587 data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
588
589 if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
590 data |= RCAR_CAN_RTR;
591 } else {
592 for (i = 0; i < cf->can_dlc; i++)
593 writeb(cf->data[i],
594 &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
595 }
596
597 writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
598
599 writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
600
601 priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
602 can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
603 priv->tx_head++;
604 /* Start Tx: write 0xff to the TFPCR register to increment
605 * the CPU-side pointer for the transmit FIFO to the next
606 * mailbox location
607 */
608 writeb(0xff, &priv->regs->tfpcr);
609 /* Stop the queue if we've filled all FIFO entries */
610 if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
611 netif_stop_queue(ndev);
612
613 return NETDEV_TX_OK;
614}
615
616static const struct net_device_ops rcar_can_netdev_ops = {
617 .ndo_open = rcar_can_open,
618 .ndo_stop = rcar_can_close,
619 .ndo_start_xmit = rcar_can_start_xmit,
620};
621
622static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
623{
624 struct net_device_stats *stats = &priv->ndev->stats;
625 struct can_frame *cf;
626 struct sk_buff *skb;
627 u32 data;
628 u8 dlc;
629
630 skb = alloc_can_skb(priv->ndev, &cf);
631 if (!skb) {
632 stats->rx_dropped++;
633 return;
634 }
635
636 data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
637 if (data & RCAR_CAN_IDE)
638 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
639 else
640 cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
641
642 dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
643 cf->can_dlc = get_can_dlc(dlc);
644 if (data & RCAR_CAN_RTR) {
645 cf->can_id |= CAN_RTR_FLAG;
646 } else {
647 for (dlc = 0; dlc < cf->can_dlc; dlc++)
648 cf->data[dlc] =
649 readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
650 }
651
652 can_led_event(priv->ndev, CAN_LED_EVENT_RX);
653
654 stats->rx_bytes += cf->can_dlc;
655 stats->rx_packets++;
656 netif_receive_skb(skb);
657}
658
659static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
660{
661 struct rcar_can_priv *priv = container_of(napi,
662 struct rcar_can_priv, napi);
663 int num_pkts;
664
665 for (num_pkts = 0; num_pkts < quota; num_pkts++) {
666 u8 rfcr, isr;
667
668 isr = readb(&priv->regs->isr);
669 /* Clear interrupt bit */
670 if (isr & RCAR_CAN_ISR_RXFF)
671 writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
672 rfcr = readb(&priv->regs->rfcr);
673 if (rfcr & RCAR_CAN_RFCR_RFEST)
674 break;
675 rcar_can_rx_pkt(priv);
676 /* Write 0xff to the RFPCR register to increment
677 * the CPU-side pointer for the receive FIFO
678 * to the next mailbox location
679 */
680 writeb(0xff, &priv->regs->rfpcr);
681 }
682 /* All packets processed */
683 if (num_pkts < quota) {
684 napi_complete(napi);
685 priv->ier |= RCAR_CAN_IER_RXFIE;
686 writeb(priv->ier, &priv->regs->ier);
687 }
688 return num_pkts;
689}
690
691static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
692{
693 switch (mode) {
694 case CAN_MODE_START:
695 rcar_can_start(ndev);
696 netif_wake_queue(ndev);
697 return 0;
698 default:
699 return -EOPNOTSUPP;
700 }
701}
702
703static int rcar_can_get_berr_counter(const struct net_device *dev,
704 struct can_berr_counter *bec)
705{
706 struct rcar_can_priv *priv = netdev_priv(dev);
707 int err;
708
709 err = clk_prepare_enable(priv->clk);
710 if (err)
711 return err;
712 bec->txerr = readb(&priv->regs->tecr);
713 bec->rxerr = readb(&priv->regs->recr);
714 clk_disable_unprepare(priv->clk);
715 return 0;
716}
717
718static int rcar_can_probe(struct platform_device *pdev)
719{
720 struct rcar_can_platform_data *pdata;
721 struct rcar_can_priv *priv;
722 struct net_device *ndev;
723 struct resource *mem;
724 void __iomem *addr;
725 int err = -ENODEV;
726 int irq;
727
728 pdata = dev_get_platdata(&pdev->dev);
729 if (!pdata) {
730 dev_err(&pdev->dev, "No platform data provided!\n");
731 goto fail;
732 }
733
734 irq = platform_get_irq(pdev, 0);
735 if (!irq) {
736 dev_err(&pdev->dev, "No IRQ resource\n");
737 goto fail;
738 }
739
740 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
741 addr = devm_ioremap_resource(&pdev->dev, mem);
742 if (IS_ERR(addr)) {
743 err = PTR_ERR(addr);
744 goto fail;
745 }
746
747 ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
748 if (!ndev) {
749 dev_err(&pdev->dev, "alloc_candev() failed\n");
750 err = -ENOMEM;
751 goto fail;
752 }
753
754 priv = netdev_priv(ndev);
755
756 priv->clk = devm_clk_get(&pdev->dev, NULL);
757 if (IS_ERR(priv->clk)) {
758 err = PTR_ERR(priv->clk);
759 dev_err(&pdev->dev, "cannot get clock: %d\n", err);
760 goto fail_clk;
761 }
762
763 ndev->netdev_ops = &rcar_can_netdev_ops;
764 ndev->irq = irq;
765 ndev->flags |= IFF_ECHO;
766 priv->ndev = ndev;
767 priv->regs = addr;
768 priv->clock_select = pdata->clock_select;
769 priv->can.clock.freq = clk_get_rate(priv->clk);
770 priv->can.bittiming_const = &rcar_can_bittiming_const;
771 priv->can.do_set_mode = rcar_can_do_set_mode;
772 priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
773 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
774 platform_set_drvdata(pdev, ndev);
775 SET_NETDEV_DEV(ndev, &pdev->dev);
776
777 netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
778 RCAR_CAN_NAPI_WEIGHT);
779 err = register_candev(ndev);
780 if (err) {
781 dev_err(&pdev->dev, "register_candev() failed, error %d\n",
782 err);
783 goto fail_candev;
784 }
785
786 devm_can_led_init(ndev);
787
788 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
789 priv->regs, ndev->irq);
790
791 return 0;
792fail_candev:
793 netif_napi_del(&priv->napi);
794fail_clk:
795 free_candev(ndev);
796fail:
797 return err;
798}
799
800static int rcar_can_remove(struct platform_device *pdev)
801{
802 struct net_device *ndev = platform_get_drvdata(pdev);
803 struct rcar_can_priv *priv = netdev_priv(ndev);
804
805 unregister_candev(ndev);
806 netif_napi_del(&priv->napi);
807 free_candev(ndev);
808 return 0;
809}
810
811static int __maybe_unused rcar_can_suspend(struct device *dev)
812{
813 struct net_device *ndev = dev_get_drvdata(dev);
814 struct rcar_can_priv *priv = netdev_priv(ndev);
815 u16 ctlr;
816
817 if (netif_running(ndev)) {
818 netif_stop_queue(ndev);
819 netif_device_detach(ndev);
820 }
821 ctlr = readw(&priv->regs->ctlr);
822 ctlr |= RCAR_CAN_CTLR_CANM_HALT;
823 writew(ctlr, &priv->regs->ctlr);
824 ctlr |= RCAR_CAN_CTLR_SLPM;
825 writew(ctlr, &priv->regs->ctlr);
826 priv->can.state = CAN_STATE_SLEEPING;
827
828 clk_disable(priv->clk);
829 return 0;
830}
831
832static int __maybe_unused rcar_can_resume(struct device *dev)
833{
834 struct net_device *ndev = dev_get_drvdata(dev);
835 struct rcar_can_priv *priv = netdev_priv(ndev);
836 u16 ctlr;
837 int err;
838
839 err = clk_enable(priv->clk);
840 if (err) {
841 netdev_err(ndev, "clk_enable() failed, error %d\n", err);
842 return err;
843 }
844
845 ctlr = readw(&priv->regs->ctlr);
846 ctlr &= ~RCAR_CAN_CTLR_SLPM;
847 writew(ctlr, &priv->regs->ctlr);
848 ctlr &= ~RCAR_CAN_CTLR_CANM;
849 writew(ctlr, &priv->regs->ctlr);
850 priv->can.state = CAN_STATE_ERROR_ACTIVE;
851
852 if (netif_running(ndev)) {
853 netif_device_attach(ndev);
854 netif_start_queue(ndev);
855 }
856 return 0;
857}
858
859static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
860
861static struct platform_driver rcar_can_driver = {
862 .driver = {
863 .name = RCAR_CAN_DRV_NAME,
864 .owner = THIS_MODULE,
865 .pm = &rcar_can_pm_ops,
866 },
867 .probe = rcar_can_probe,
868 .remove = rcar_can_remove,
869};
870
871module_platform_driver(rcar_can_driver);
872
873MODULE_AUTHOR("Cogent Embedded, Inc.");
874MODULE_LICENSE("GPL");
875MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
876MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7d8c8f3672dd..bacd236ce306 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -556,15 +556,6 @@ failed:
556/* 556/*
557 * netdev sysfs 557 * netdev sysfs
558 */ 558 */
559static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
560 char *buf)
561{
562 struct net_device *ndev = to_net_dev(dev);
563 struct softing_priv *priv = netdev2softing(ndev);
564
565 return sprintf(buf, "%i\n", priv->index);
566}
567
568static ssize_t show_chip(struct device *dev, struct device_attribute *attr, 559static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
569 char *buf) 560 char *buf)
570{ 561{
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
609 return count; 600 return count;
610} 601}
611 602
612static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
613static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); 603static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
614static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); 604static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
615 605
616static const struct attribute *const netdev_sysfs_attrs[] = { 606static const struct attribute *const netdev_sysfs_attrs[] = {
617 &dev_attr_channel.attr,
618 &dev_attr_chip.attr, 607 &dev_attr_chip.attr,
619 &dev_attr_output.attr, 608 &dev_attr_output.attr,
620 NULL, 609 NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
679{ 668{
680 int ret; 669 int ret;
681 670
682 netdev->sysfs_groups[0] = &netdev_sysfs_group;
683 ret = register_candev(netdev); 671 ret = register_candev(netdev);
684 if (ret) { 672 if (ret) {
685 dev_alert(&netdev->dev, "register failed\n"); 673 dev_alert(&netdev->dev, "register failed\n");
686 return ret; 674 return ret;
687 } 675 }
676 if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
677 netdev_alert(netdev, "sysfs group failed\n");
678
688 return 0; 679 return 0;
689} 680}
690 681
691static void softing_netdev_cleanup(struct net_device *netdev) 682static void softing_netdev_cleanup(struct net_device *netdev)
692{ 683{
684 sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
693 unregister_candev(netdev); 685 unregister_candev(netdev);
694 free_candev(netdev); 686 free_candev(netdev);
695} 687}
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
721DEV_ATTR_RO_STR(hardware, pdat->name); 713DEV_ATTR_RO_STR(hardware, pdat->name);
722DEV_ATTR_RO(hardware_version, id.hw_version); 714DEV_ATTR_RO(hardware_version, id.hw_version);
723DEV_ATTR_RO(license, id.license); 715DEV_ATTR_RO(license, id.license);
724DEV_ATTR_RO(frequency, id.freq);
725DEV_ATTR_RO(txpending, tx.pending);
726 716
727static struct attribute *softing_pdev_attrs[] = { 717static struct attribute *softing_pdev_attrs[] = {
728 &dev_attr_serial.attr, 718 &dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
731 &dev_attr_hardware.attr, 721 &dev_attr_hardware.attr,
732 &dev_attr_hardware_version.attr, 722 &dev_attr_hardware_version.attr,
733 &dev_attr_license.attr, 723 &dev_attr_license.attr,
734 &dev_attr_frequency.attr,
735 &dev_attr_txpending.attr,
736 NULL, 724 NULL,
737}; 725};
738 726
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644
index 000000000000..148cae5871a6
--- /dev/null
+++ b/drivers/net/can/spi/Kconfig
@@ -0,0 +1,10 @@
1menu "CAN SPI interfaces"
2 depends on SPI
3
4config CAN_MCP251X
5 tristate "Microchip MCP251x SPI CAN controllers"
6 depends on HAS_DMA
7 ---help---
8 Driver for the Microchip MCP251x SPI CAN controllers.
9
10endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644
index 000000000000..90bcacffbc65
--- /dev/null
+++ b/drivers/net/can/spi/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Linux Controller Area Network SPI drivers.
3#
4
5
6obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 28c11f815245..5df239e68812 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -214,6 +214,8 @@
214 214
215#define TX_ECHO_SKB_MAX 1 215#define TX_ECHO_SKB_MAX 1
216 216
217#define MCP251X_OST_DELAY_MS (5)
218
217#define DEVICE_NAME "mcp251x" 219#define DEVICE_NAME "mcp251x"
218 220
219static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */ 221static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
@@ -624,50 +626,45 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
624static int mcp251x_hw_reset(struct spi_device *spi) 626static int mcp251x_hw_reset(struct spi_device *spi)
625{ 627{
626 struct mcp251x_priv *priv = spi_get_drvdata(spi); 628 struct mcp251x_priv *priv = spi_get_drvdata(spi);
629 u8 reg;
627 int ret; 630 int ret;
628 unsigned long timeout; 631
632 /* Wait for oscillator startup timer after power up */
633 mdelay(MCP251X_OST_DELAY_MS);
629 634
630 priv->spi_tx_buf[0] = INSTRUCTION_RESET; 635 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
631 ret = spi_write(spi, priv->spi_tx_buf, 1); 636 ret = mcp251x_spi_trans(spi, 1);
632 if (ret) { 637 if (ret)
633 dev_err(&spi->dev, "reset failed: ret = %d\n", ret); 638 return ret;
634 return -EIO; 639
635 } 640 /* Wait for oscillator startup timer after reset */
641 mdelay(MCP251X_OST_DELAY_MS);
642
643 reg = mcp251x_read_reg(spi, CANSTAT);
644 if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
645 return -ENODEV;
636 646
637 /* Wait for reset to finish */
638 timeout = jiffies + HZ;
639 mdelay(10);
640 while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
641 != CANCTRL_REQOP_CONF) {
642 schedule();
643 if (time_after(jiffies, timeout)) {
644 dev_err(&spi->dev, "MCP251x didn't"
645 " enter in conf mode after reset\n");
646 return -EBUSY;
647 }
648 }
649 return 0; 647 return 0;
650} 648}
651 649
652static int mcp251x_hw_probe(struct spi_device *spi) 650static int mcp251x_hw_probe(struct spi_device *spi)
653{ 651{
654 int st1, st2; 652 u8 ctrl;
653 int ret;
655 654
656 mcp251x_hw_reset(spi); 655 ret = mcp251x_hw_reset(spi);
656 if (ret)
657 return ret;
657 658
658 /* 659 ctrl = mcp251x_read_reg(spi, CANCTRL);
659 * Please note that these are "magic values" based on after 660
660 * reset defaults taken from data sheet which allows us to see 661 dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
661 * if we really have a chip on the bus (we avoid common all
662 * zeroes or all ones situations)
663 */
664 st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
665 st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
666 662
667 dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2); 663 /* Check for power up default value */
664 if ((ctrl & 0x17) != 0x07)
665 return -ENODEV;
668 666
669 /* Check for power up default values */ 667 return 0;
670 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
671} 668}
672 669
673static int mcp251x_power_enable(struct regulator *reg, int enable) 670static int mcp251x_power_enable(struct regulator *reg, int enable)
@@ -776,7 +773,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
776 773
777 mutex_lock(&priv->mcp_lock); 774 mutex_lock(&priv->mcp_lock);
778 if (priv->after_suspend) { 775 if (priv->after_suspend) {
779 mdelay(10);
780 mcp251x_hw_reset(spi); 776 mcp251x_hw_reset(spi);
781 mcp251x_setup(net, priv, spi); 777 mcp251x_setup(net, priv, spi);
782 if (priv->after_suspend & AFTER_SUSPEND_RESTART) { 778 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
@@ -955,7 +951,7 @@ static int mcp251x_open(struct net_device *net)
955 priv->tx_len = 0; 951 priv->tx_len = 0;
956 952
957 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, 953 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
958 flags, DEVICE_NAME, priv); 954 flags | IRQF_ONESHOT, DEVICE_NAME, priv);
959 if (ret) { 955 if (ret) {
960 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 956 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
961 mcp251x_power_enable(priv->transceiver, 0); 957 mcp251x_power_enable(priv->transceiver, 0);
@@ -1032,8 +1028,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1032 struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev); 1028 struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
1033 struct net_device *net; 1029 struct net_device *net;
1034 struct mcp251x_priv *priv; 1030 struct mcp251x_priv *priv;
1035 int freq, ret = -ENODEV;
1036 struct clk *clk; 1031 struct clk *clk;
1032 int freq, ret;
1037 1033
1038 clk = devm_clk_get(&spi->dev, NULL); 1034 clk = devm_clk_get(&spi->dev, NULL);
1039 if (IS_ERR(clk)) { 1035 if (IS_ERR(clk)) {
@@ -1076,6 +1072,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
1076 priv->net = net; 1072 priv->net = net;
1077 priv->clk = clk; 1073 priv->clk = clk;
1078 1074
1075 spi_set_drvdata(spi, priv);
1076
1077 /* Configure the SPI bus */
1078 spi->bits_per_word = 8;
1079 if (mcp251x_is_2510(spi))
1080 spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
1081 else
1082 spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
1083 ret = spi_setup(spi);
1084 if (ret)
1085 goto out_clk;
1086
1079 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1087 priv->power = devm_regulator_get(&spi->dev, "vdd");
1080 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1088 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
1081 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1089 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
@@ -1088,8 +1096,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
1088 if (ret) 1096 if (ret)
1089 goto out_clk; 1097 goto out_clk;
1090 1098
1091 spi_set_drvdata(spi, priv);
1092
1093 priv->spi = spi; 1099 priv->spi = spi;
1094 mutex_init(&priv->mcp_lock); 1100 mutex_init(&priv->mcp_lock);
1095 1101
@@ -1134,20 +1140,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
1134 1140
1135 SET_NETDEV_DEV(net, &spi->dev); 1141 SET_NETDEV_DEV(net, &spi->dev);
1136 1142
1137 /* Configure the SPI bus */
1138 spi->mode = spi->mode ? : SPI_MODE_0;
1139 if (mcp251x_is_2510(spi))
1140 spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
1141 else
1142 spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
1143 spi->bits_per_word = 8;
1144 spi_setup(spi);
1145
1146 /* Here is OK to not lock the MCP, no one knows about it yet */ 1143 /* Here is OK to not lock the MCP, no one knows about it yet */
1147 if (!mcp251x_hw_probe(spi)) { 1144 ret = mcp251x_hw_probe(spi);
1148 ret = -ENODEV; 1145 if (ret)
1149 goto error_probe; 1146 goto error_probe;
1150 } 1147
1151 mcp251x_hw_sleep(spi); 1148 mcp251x_hw_sleep(spi);
1152 1149
1153 ret = register_candev(net); 1150 ret = register_candev(net);
@@ -1156,7 +1153,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
1156 1153
1157 devm_can_led_init(net); 1154 devm_can_led_init(net);
1158 1155
1159 return ret; 1156 return 0;
1160 1157
1161error_probe: 1158error_probe:
1162 if (mcp251x_enable_dma) 1159 if (mcp251x_enable_dma)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index fc96a3d83ebe..a77db919363c 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,13 +13,21 @@ config CAN_ESD_USB2
13 This driver supports the CAN-USB/2 interface 13 This driver supports the CAN-USB/2 interface
14 from esd electronic system design gmbh (http://www.esd.eu). 14 from esd electronic system design gmbh (http://www.esd.eu).
15 15
16config CAN_GS_USB
17 tristate "Geschwister Schneider UG interfaces"
18 ---help---
19 This driver supports the Geschwister Schneider USB/CAN devices.
20 If unsure choose N,
21 choose Y for built in support,
22 M to compile as module (module will be named: gs_usb).
23
16config CAN_KVASER_USB 24config CAN_KVASER_USB
17 tristate "Kvaser CAN/USB interface" 25 tristate "Kvaser CAN/USB interface"
18 ---help--- 26 ---help---
19 This driver adds support for Kvaser CAN/USB devices like Kvaser 27 This driver adds support for Kvaser CAN/USB devices like Kvaser
20 Leaf Light. 28 Leaf Light.
21 29
22 The driver gives support for the following devices: 30 The driver provides support for the following devices:
23 - Kvaser Leaf Light 31 - Kvaser Leaf Light
24 - Kvaser Leaf Professional HS 32 - Kvaser Leaf Professional HS
25 - Kvaser Leaf SemiPro HS 33 - Kvaser Leaf SemiPro HS
@@ -36,6 +44,8 @@ config CAN_KVASER_USB
36 - Kvaser Leaf Light "China" 44 - Kvaser Leaf Light "China"
37 - Kvaser BlackBird SemiPro 45 - Kvaser BlackBird SemiPro
38 - Kvaser USBcan R 46 - Kvaser USBcan R
47 - Kvaser Leaf Light v2
48 - Kvaser Mini PCI Express HS
39 49
40 If unsure, say N. 50 If unsure, say N.
41 51
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index becef460a91a..7b9a393b1ac8 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o 6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
7obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
7obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o 8obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
8obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ 9obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
9obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o 10obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644
index 000000000000..04b0f84612f0
--- /dev/null
+++ b/drivers/net/can/usb/gs_usb.c
@@ -0,0 +1,971 @@
1/* CAN driver for Geschwister Schneider USB/CAN devices.
2 *
3 * Copyright (C) 2013 Geschwister Schneider Technologie-,
4 * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
5 *
6 * Many thanks to all socketcan devs!
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published
10 * by the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/init.h>
19#include <linux/signal.h>
20#include <linux/module.h>
21#include <linux/netdevice.h>
22#include <linux/usb.h>
23
24#include <linux/can.h>
25#include <linux/can/dev.h>
26#include <linux/can/error.h>
27
28/* Device specific constants */
29#define USB_GSUSB_1_VENDOR_ID 0x1d50
30#define USB_GSUSB_1_PRODUCT_ID 0x606f
31
32#define GSUSB_ENDPOINT_IN 1
33#define GSUSB_ENDPOINT_OUT 2
34
35/* Device specific constants */
36enum gs_usb_breq {
37 GS_USB_BREQ_HOST_FORMAT = 0,
38 GS_USB_BREQ_BITTIMING,
39 GS_USB_BREQ_MODE,
40 GS_USB_BREQ_BERR,
41 GS_USB_BREQ_BT_CONST,
42 GS_USB_BREQ_DEVICE_CONFIG
43};
44
45enum gs_can_mode {
46 /* reset a channel. turns it off */
47 GS_CAN_MODE_RESET = 0,
48 /* starts a channel */
49 GS_CAN_MODE_START
50};
51
52enum gs_can_state {
53 GS_CAN_STATE_ERROR_ACTIVE = 0,
54 GS_CAN_STATE_ERROR_WARNING,
55 GS_CAN_STATE_ERROR_PASSIVE,
56 GS_CAN_STATE_BUS_OFF,
57 GS_CAN_STATE_STOPPED,
58 GS_CAN_STATE_SLEEPING
59};
60
61/* data types passed between host and device */
62struct gs_host_config {
63 u32 byte_order;
64} __packed;
65/* All data exchanged between host and device is exchanged in host byte order,
66 * thanks to the struct gs_host_config byte_order member, which is sent first
67 * to indicate the desired byte order.
68 */
69
70struct gs_device_config {
71 u8 reserved1;
72 u8 reserved2;
73 u8 reserved3;
74 u8 icount;
75 u32 sw_version;
76 u32 hw_version;
77} __packed;
78
79#define GS_CAN_MODE_NORMAL 0
80#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
81#define GS_CAN_MODE_LOOP_BACK (1<<1)
82#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
83#define GS_CAN_MODE_ONE_SHOT (1<<3)
84
85struct gs_device_mode {
86 u32 mode;
87 u32 flags;
88} __packed;
89
90struct gs_device_state {
91 u32 state;
92 u32 rxerr;
93 u32 txerr;
94} __packed;
95
96struct gs_device_bittiming {
97 u32 prop_seg;
98 u32 phase_seg1;
99 u32 phase_seg2;
100 u32 sjw;
101 u32 brp;
102} __packed;
103
104#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
105#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
106#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
107#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
108
109struct gs_device_bt_const {
110 u32 feature;
111 u32 fclk_can;
112 u32 tseg1_min;
113 u32 tseg1_max;
114 u32 tseg2_min;
115 u32 tseg2_max;
116 u32 sjw_max;
117 u32 brp_min;
118 u32 brp_max;
119 u32 brp_inc;
120} __packed;
121
122#define GS_CAN_FLAG_OVERFLOW 1
123
124struct gs_host_frame {
125 u32 echo_id;
126 u32 can_id;
127
128 u8 can_dlc;
129 u8 channel;
130 u8 flags;
131 u8 reserved;
132
133 u8 data[8];
134} __packed;
135/* The GS USB devices make use of the same flags and masks as in
136 * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
137 */
138
139/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
140#define GS_MAX_TX_URBS 10
141/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
142#define GS_MAX_RX_URBS 30
143/* Maximum number of interfaces the driver supports per device.
144 * Current hardware only supports 2 interfaces. The future may vary.
145 */
146#define GS_MAX_INTF 2
147
148struct gs_tx_context {
149 struct gs_can *dev;
150 unsigned int echo_id;
151};
152
153struct gs_can {
154 struct can_priv can; /* must be the first member */
155
156 struct gs_usb *parent;
157
158 struct net_device *netdev;
159 struct usb_device *udev;
160 struct usb_interface *iface;
161
162 struct can_bittiming_const bt_const;
163 unsigned int channel; /* channel number */
164
165 /* This lock prevents a race condition between xmit and recieve. */
166 spinlock_t tx_ctx_lock;
167 struct gs_tx_context tx_context[GS_MAX_TX_URBS];
168
169 struct usb_anchor tx_submitted;
170 atomic_t active_tx_urbs;
171};
172
173/* usb interface struct */
174struct gs_usb {
175 struct gs_can *canch[GS_MAX_INTF];
176 struct usb_anchor rx_submitted;
177 atomic_t active_channels;
178 struct usb_device *udev;
179};
180
181/* 'allocate' a tx context.
182 * returns a valid tx context or NULL if there is no space.
183 */
184static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
185{
186 int i = 0;
187 unsigned long flags;
188
189 spin_lock_irqsave(&dev->tx_ctx_lock, flags);
190
191 for (; i < GS_MAX_TX_URBS; i++) {
192 if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
193 dev->tx_context[i].echo_id = i;
194 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
195 return &dev->tx_context[i];
196 }
197 }
198
199 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
200 return NULL;
201}
202
203/* releases a tx context
204 */
205static void gs_free_tx_context(struct gs_tx_context *txc)
206{
207 txc->echo_id = GS_MAX_TX_URBS;
208}
209
210/* Get a tx context by id.
211 */
212static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
213{
214 unsigned long flags;
215
216 if (id < GS_MAX_TX_URBS) {
217 spin_lock_irqsave(&dev->tx_ctx_lock, flags);
218 if (dev->tx_context[id].echo_id == id) {
219 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
220 return &dev->tx_context[id];
221 }
222 spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
223 }
224 return NULL;
225}
226
227static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
228{
229 struct gs_device_mode *dm;
230 struct usb_interface *intf = gsdev->iface;
231 int rc;
232
233 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
234 if (!dm)
235 return -ENOMEM;
236
237 dm->mode = GS_CAN_MODE_RESET;
238
239 rc = usb_control_msg(interface_to_usbdev(intf),
240 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
241 GS_USB_BREQ_MODE,
242 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
243 gsdev->channel,
244 0,
245 dm,
246 sizeof(*dm),
247 1000);
248
249 return rc;
250}
251
252static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
253{
254 struct can_device_stats *can_stats = &dev->can.can_stats;
255
256 if (cf->can_id & CAN_ERR_RESTARTED) {
257 dev->can.state = CAN_STATE_ERROR_ACTIVE;
258 can_stats->restarts++;
259 } else if (cf->can_id & CAN_ERR_BUSOFF) {
260 dev->can.state = CAN_STATE_BUS_OFF;
261 can_stats->bus_off++;
262 } else if (cf->can_id & CAN_ERR_CRTL) {
263 if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
264 (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
265 dev->can.state = CAN_STATE_ERROR_WARNING;
266 can_stats->error_warning++;
267 } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
268 (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
269 dev->can.state = CAN_STATE_ERROR_PASSIVE;
270 can_stats->error_passive++;
271 } else {
272 dev->can.state = CAN_STATE_ERROR_ACTIVE;
273 }
274 }
275}
276
277static void gs_usb_recieve_bulk_callback(struct urb *urb)
278{
279 struct gs_usb *usbcan = urb->context;
280 struct gs_can *dev;
281 struct net_device *netdev;
282 int rc;
283 struct net_device_stats *stats;
284 struct gs_host_frame *hf = urb->transfer_buffer;
285 struct gs_tx_context *txc;
286 struct can_frame *cf;
287 struct sk_buff *skb;
288
289 BUG_ON(!usbcan);
290
291 switch (urb->status) {
292 case 0: /* success */
293 break;
294 case -ENOENT:
295 case -ESHUTDOWN:
296 return;
297 default:
298 /* do not resubmit aborted urbs. eg: when device goes down */
299 return;
300 }
301
302 /* device reports out of range channel id */
303 if (hf->channel >= GS_MAX_INTF)
304 goto resubmit_urb;
305
306 dev = usbcan->canch[hf->channel];
307
308 netdev = dev->netdev;
309 stats = &netdev->stats;
310
311 if (!netif_device_present(netdev))
312 return;
313
314 if (hf->echo_id == -1) { /* normal rx */
315 skb = alloc_can_skb(dev->netdev, &cf);
316 if (!skb)
317 return;
318
319 cf->can_id = hf->can_id;
320
321 cf->can_dlc = get_can_dlc(hf->can_dlc);
322 memcpy(cf->data, hf->data, 8);
323
324 /* ERROR frames tell us information about the controller */
325 if (hf->can_id & CAN_ERR_FLAG)
326 gs_update_state(dev, cf);
327
328 netdev->stats.rx_packets++;
329 netdev->stats.rx_bytes += hf->can_dlc;
330
331 netif_rx(skb);
332 } else { /* echo_id == hf->echo_id */
333 if (hf->echo_id >= GS_MAX_TX_URBS) {
334 netdev_err(netdev,
335 "Unexpected out of range echo id %d\n",
336 hf->echo_id);
337 goto resubmit_urb;
338 }
339
340 netdev->stats.tx_packets++;
341 netdev->stats.tx_bytes += hf->can_dlc;
342
343 txc = gs_get_tx_context(dev, hf->echo_id);
344
345 /* bad devices send bad echo_ids. */
346 if (!txc) {
347 netdev_err(netdev,
348 "Unexpected unused echo id %d\n",
349 hf->echo_id);
350 goto resubmit_urb;
351 }
352
353 can_get_echo_skb(netdev, hf->echo_id);
354
355 gs_free_tx_context(txc);
356
357 netif_wake_queue(netdev);
358 }
359
360 if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
361 skb = alloc_can_err_skb(netdev, &cf);
362 if (!skb)
363 goto resubmit_urb;
364
365 cf->can_id |= CAN_ERR_CRTL;
366 cf->can_dlc = CAN_ERR_DLC;
367 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
368 stats->rx_over_errors++;
369 stats->rx_errors++;
370 netif_rx(skb);
371 }
372
373 resubmit_urb:
374 usb_fill_bulk_urb(urb,
375 usbcan->udev,
376 usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
377 hf,
378 sizeof(struct gs_host_frame),
379 gs_usb_recieve_bulk_callback,
380 usbcan
381 );
382
383 rc = usb_submit_urb(urb, GFP_ATOMIC);
384
385 /* USB failure take down all interfaces */
386 if (rc == -ENODEV) {
387 for (rc = 0; rc < GS_MAX_INTF; rc++) {
388 if (usbcan->canch[rc])
389 netif_device_detach(usbcan->canch[rc]->netdev);
390 }
391 }
392}
393
394static int gs_usb_set_bittiming(struct net_device *netdev)
395{
396 struct gs_can *dev = netdev_priv(netdev);
397 struct can_bittiming *bt = &dev->can.bittiming;
398 struct usb_interface *intf = dev->iface;
399 int rc;
400 struct gs_device_bittiming *dbt;
401
402 dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
403 if (!dbt)
404 return -ENOMEM;
405
406 dbt->prop_seg = bt->prop_seg;
407 dbt->phase_seg1 = bt->phase_seg1;
408 dbt->phase_seg2 = bt->phase_seg2;
409 dbt->sjw = bt->sjw;
410 dbt->brp = bt->brp;
411
412 /* request bit timings */
413 rc = usb_control_msg(interface_to_usbdev(intf),
414 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
415 GS_USB_BREQ_BITTIMING,
416 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
417 dev->channel,
418 0,
419 dbt,
420 sizeof(*dbt),
421 1000);
422
423 kfree(dbt);
424
425 if (rc < 0)
426 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
427 rc);
428
429 return rc;
430}
431
432static void gs_usb_xmit_callback(struct urb *urb)
433{
434 struct gs_tx_context *txc = urb->context;
435 struct gs_can *dev = txc->dev;
436 struct net_device *netdev = dev->netdev;
437
438 if (urb->status)
439 netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
440
441 usb_free_coherent(urb->dev,
442 urb->transfer_buffer_length,
443 urb->transfer_buffer,
444 urb->transfer_dma);
445
446 atomic_dec(&dev->active_tx_urbs);
447
448 if (!netif_device_present(netdev))
449 return;
450
451 if (netif_queue_stopped(netdev))
452 netif_wake_queue(netdev);
453}
454
455static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
456{
457 struct gs_can *dev = netdev_priv(netdev);
458 struct net_device_stats *stats = &dev->netdev->stats;
459 struct urb *urb;
460 struct gs_host_frame *hf;
461 struct can_frame *cf;
462 int rc;
463 unsigned int idx;
464 struct gs_tx_context *txc;
465
466 if (can_dropped_invalid_skb(netdev, skb))
467 return NETDEV_TX_OK;
468
469 /* find an empty context to keep track of transmission */
470 txc = gs_alloc_tx_context(dev);
471 if (!txc)
472 return NETDEV_TX_BUSY;
473
474 /* create a URB, and a buffer for it */
475 urb = usb_alloc_urb(0, GFP_ATOMIC);
476 if (!urb) {
477 netdev_err(netdev, "No memory left for URB\n");
478 goto nomem_urb;
479 }
480
481 hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
482 &urb->transfer_dma);
483 if (!hf) {
484 netdev_err(netdev, "No memory left for USB buffer\n");
485 goto nomem_hf;
486 }
487
488 idx = txc->echo_id;
489
490 if (idx >= GS_MAX_TX_URBS) {
491 netdev_err(netdev, "Invalid tx context %d\n", idx);
492 goto badidx;
493 }
494
495 hf->echo_id = idx;
496 hf->channel = dev->channel;
497
498 cf = (struct can_frame *)skb->data;
499
500 hf->can_id = cf->can_id;
501 hf->can_dlc = cf->can_dlc;
502 memcpy(hf->data, cf->data, cf->can_dlc);
503
504 usb_fill_bulk_urb(urb, dev->udev,
505 usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
506 hf,
507 sizeof(*hf),
508 gs_usb_xmit_callback,
509 txc);
510
511 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
512 usb_anchor_urb(urb, &dev->tx_submitted);
513
514 can_put_echo_skb(skb, netdev, idx);
515
516 atomic_inc(&dev->active_tx_urbs);
517
518 rc = usb_submit_urb(urb, GFP_ATOMIC);
519 if (unlikely(rc)) { /* usb send failed */
520 atomic_dec(&dev->active_tx_urbs);
521
522 can_free_echo_skb(netdev, idx);
523 gs_free_tx_context(txc);
524
525 usb_unanchor_urb(urb);
526 usb_free_coherent(dev->udev,
527 sizeof(*hf),
528 hf,
529 urb->transfer_dma);
530
531
532 if (rc == -ENODEV) {
533 netif_device_detach(netdev);
534 } else {
535 netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
536 stats->tx_dropped++;
537 }
538 } else {
539 /* Slow down tx path */
540 if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
541 netif_stop_queue(netdev);
542 }
543
544 /* let usb core take care of this urb */
545 usb_free_urb(urb);
546
547 return NETDEV_TX_OK;
548
549 badidx:
550 usb_free_coherent(dev->udev,
551 sizeof(*hf),
552 hf,
553 urb->transfer_dma);
554 nomem_hf:
555 usb_free_urb(urb);
556
557 nomem_urb:
558 gs_free_tx_context(txc);
559 dev_kfree_skb(skb);
560 stats->tx_dropped++;
561 return NETDEV_TX_OK;
562}
563
564static int gs_can_open(struct net_device *netdev)
565{
566 struct gs_can *dev = netdev_priv(netdev);
567 struct gs_usb *parent = dev->parent;
568 int rc, i;
569 struct gs_device_mode *dm;
570 u32 ctrlmode;
571
572 rc = open_candev(netdev);
573 if (rc)
574 return rc;
575
576 if (atomic_add_return(1, &parent->active_channels) == 1) {
577 for (i = 0; i < GS_MAX_RX_URBS; i++) {
578 struct urb *urb;
579 u8 *buf;
580
581 /* alloc rx urb */
582 urb = usb_alloc_urb(0, GFP_KERNEL);
583 if (!urb) {
584 netdev_err(netdev,
585 "No memory left for URB\n");
586 return -ENOMEM;
587 }
588
589 /* alloc rx buffer */
590 buf = usb_alloc_coherent(dev->udev,
591 sizeof(struct gs_host_frame),
592 GFP_KERNEL,
593 &urb->transfer_dma);
594 if (!buf) {
595 netdev_err(netdev,
596 "No memory left for USB buffer\n");
597 usb_free_urb(urb);
598 return -ENOMEM;
599 }
600
601 /* fill, anchor, and submit rx urb */
602 usb_fill_bulk_urb(urb,
603 dev->udev,
604 usb_rcvbulkpipe(dev->udev,
605 GSUSB_ENDPOINT_IN),
606 buf,
607 sizeof(struct gs_host_frame),
608 gs_usb_recieve_bulk_callback,
609 parent);
610 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
611
612 usb_anchor_urb(urb, &parent->rx_submitted);
613
614 rc = usb_submit_urb(urb, GFP_KERNEL);
615 if (rc) {
616 if (rc == -ENODEV)
617 netif_device_detach(dev->netdev);
618
619 netdev_err(netdev,
620 "usb_submit failed (err=%d)\n",
621 rc);
622
623 usb_unanchor_urb(urb);
624 break;
625 }
626
627 /* Drop reference,
628 * USB core will take care of freeing it
629 */
630 usb_free_urb(urb);
631 }
632 }
633
634 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
635 if (!dm)
636 return -ENOMEM;
637
638 /* flags */
639 ctrlmode = dev->can.ctrlmode;
640 dm->flags = 0;
641
642 if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
643 dm->flags |= GS_CAN_MODE_LOOP_BACK;
644 else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
645 dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
646
647 /* Controller is not allowed to retry TX
648 * this mode is unavailable on atmels uc3c hardware
649 */
650 if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
651 dm->flags |= GS_CAN_MODE_ONE_SHOT;
652
653 if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
654 dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
655
656 /* finally start device */
657 dm->mode = GS_CAN_MODE_START;
658 rc = usb_control_msg(interface_to_usbdev(dev->iface),
659 usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
660 GS_USB_BREQ_MODE,
661 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
662 dev->channel,
663 0,
664 dm,
665 sizeof(*dm),
666 1000);
667
668 if (rc < 0) {
669 netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
670 kfree(dm);
671 return rc;
672 }
673
674 kfree(dm);
675
676 dev->can.state = CAN_STATE_ERROR_ACTIVE;
677
678 if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
679 netif_start_queue(netdev);
680
681 return 0;
682}
683
684static int gs_can_close(struct net_device *netdev)
685{
686 int rc;
687 struct gs_can *dev = netdev_priv(netdev);
688 struct gs_usb *parent = dev->parent;
689
690 netif_stop_queue(netdev);
691
692 /* Stop polling */
693 if (atomic_dec_and_test(&parent->active_channels))
694 usb_kill_anchored_urbs(&parent->rx_submitted);
695
696 /* Stop sending URBs */
697 usb_kill_anchored_urbs(&dev->tx_submitted);
698 atomic_set(&dev->active_tx_urbs, 0);
699
700 /* reset the device */
701 rc = gs_cmd_reset(parent, dev);
702 if (rc < 0)
703 netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
704
705 /* reset tx contexts */
706 for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
707 dev->tx_context[rc].dev = dev;
708 dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
709 }
710
711 /* close the netdev */
712 close_candev(netdev);
713
714 return 0;
715}
716
717static const struct net_device_ops gs_usb_netdev_ops = {
718 .ndo_open = gs_can_open,
719 .ndo_stop = gs_can_close,
720 .ndo_start_xmit = gs_can_start_xmit,
721};
722
723static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
724{
725 struct gs_can *dev;
726 struct net_device *netdev;
727 int rc;
728 struct gs_device_bt_const *bt_const;
729
730 bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
731 if (!bt_const)
732 return ERR_PTR(-ENOMEM);
733
734 /* fetch bit timing constants */
735 rc = usb_control_msg(interface_to_usbdev(intf),
736 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
737 GS_USB_BREQ_BT_CONST,
738 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
739 channel,
740 0,
741 bt_const,
742 sizeof(*bt_const),
743 1000);
744
745 if (rc < 0) {
746 dev_err(&intf->dev,
747 "Couldn't get bit timing const for channel (err=%d)\n",
748 rc);
749 kfree(bt_const);
750 return ERR_PTR(rc);
751 }
752
753 /* create netdev */
754 netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
755 if (!netdev) {
756 dev_err(&intf->dev, "Couldn't allocate candev\n");
757 kfree(bt_const);
758 return ERR_PTR(-ENOMEM);
759 }
760
761 dev = netdev_priv(netdev);
762
763 netdev->netdev_ops = &gs_usb_netdev_ops;
764
765 netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
766
767 /* dev settup */
768 strcpy(dev->bt_const.name, "gs_usb");
769 dev->bt_const.tseg1_min = bt_const->tseg1_min;
770 dev->bt_const.tseg1_max = bt_const->tseg1_max;
771 dev->bt_const.tseg2_min = bt_const->tseg2_min;
772 dev->bt_const.tseg2_max = bt_const->tseg2_max;
773 dev->bt_const.sjw_max = bt_const->sjw_max;
774 dev->bt_const.brp_min = bt_const->brp_min;
775 dev->bt_const.brp_max = bt_const->brp_max;
776 dev->bt_const.brp_inc = bt_const->brp_inc;
777
778 dev->udev = interface_to_usbdev(intf);
779 dev->iface = intf;
780 dev->netdev = netdev;
781 dev->channel = channel;
782
783 init_usb_anchor(&dev->tx_submitted);
784 atomic_set(&dev->active_tx_urbs, 0);
785 spin_lock_init(&dev->tx_ctx_lock);
786 for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
787 dev->tx_context[rc].dev = dev;
788 dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
789 }
790
791 /* can settup */
792 dev->can.state = CAN_STATE_STOPPED;
793 dev->can.clock.freq = bt_const->fclk_can;
794 dev->can.bittiming_const = &dev->bt_const;
795 dev->can.do_set_bittiming = gs_usb_set_bittiming;
796
797 dev->can.ctrlmode_supported = 0;
798
799 if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
800 dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
801
802 if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
803 dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
804
805 if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
806 dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
807
808 if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
809 dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
810
811 kfree(bt_const);
812
813 SET_NETDEV_DEV(netdev, &intf->dev);
814
815 rc = register_candev(dev->netdev);
816 if (rc) {
817 free_candev(dev->netdev);
818 dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
819 return ERR_PTR(rc);
820 }
821
822 return dev;
823}
824
825static void gs_destroy_candev(struct gs_can *dev)
826{
827 unregister_candev(dev->netdev);
828 free_candev(dev->netdev);
829 usb_kill_anchored_urbs(&dev->tx_submitted);
830 kfree(dev);
831}
832
833static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
834{
835 struct gs_usb *dev;
836 int rc = -ENOMEM;
837 unsigned int icount, i;
838 struct gs_host_config *hconf;
839 struct gs_device_config *dconf;
840
841 hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
842 if (!hconf)
843 return -ENOMEM;
844
845 hconf->byte_order = 0x0000beef;
846
847 /* send host config */
848 rc = usb_control_msg(interface_to_usbdev(intf),
849 usb_sndctrlpipe(interface_to_usbdev(intf), 0),
850 GS_USB_BREQ_HOST_FORMAT,
851 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
852 1,
853 intf->altsetting[0].desc.bInterfaceNumber,
854 hconf,
855 sizeof(*hconf),
856 1000);
857
858 kfree(hconf);
859
860 if (rc < 0) {
861 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
862 rc);
863 return rc;
864 }
865
866 dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
867 if (!dconf)
868 return -ENOMEM;
869
870 /* read device config */
871 rc = usb_control_msg(interface_to_usbdev(intf),
872 usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
873 GS_USB_BREQ_DEVICE_CONFIG,
874 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
875 1,
876 intf->altsetting[0].desc.bInterfaceNumber,
877 dconf,
878 sizeof(*dconf),
879 1000);
880 if (rc < 0) {
881 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
882 rc);
883
884 kfree(dconf);
885
886 return rc;
887 }
888
889 icount = dconf->icount+1;
890
891 kfree(dconf);
892
893 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
894
895 if (icount > GS_MAX_INTF) {
896 dev_err(&intf->dev,
897 "Driver cannot handle more that %d CAN interfaces\n",
898 GS_MAX_INTF);
899 return -EINVAL;
900 }
901
902 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
903 init_usb_anchor(&dev->rx_submitted);
904
905 atomic_set(&dev->active_channels, 0);
906
907 usb_set_intfdata(intf, dev);
908 dev->udev = interface_to_usbdev(intf);
909
910 for (i = 0; i < icount; i++) {
911 dev->canch[i] = gs_make_candev(i, intf);
912 if (IS_ERR_OR_NULL(dev->canch[i])) {
913 /* on failure destroy previously created candevs */
914 icount = i;
915 for (i = 0; i < icount; i++) {
916 gs_destroy_candev(dev->canch[i]);
917 dev->canch[i] = NULL;
918 }
919 kfree(dev);
920 return rc;
921 }
922 dev->canch[i]->parent = dev;
923 }
924
925 return 0;
926}
927
928static void gs_usb_disconnect(struct usb_interface *intf)
929{
930 unsigned i;
931 struct gs_usb *dev = usb_get_intfdata(intf);
932 usb_set_intfdata(intf, NULL);
933
934 if (!dev) {
935 dev_err(&intf->dev, "Disconnect (nodata)\n");
936 return;
937 }
938
939 for (i = 0; i < GS_MAX_INTF; i++) {
940 struct gs_can *can = dev->canch[i];
941
942 if (!can)
943 continue;
944
945 gs_destroy_candev(can);
946 }
947
948 usb_kill_anchored_urbs(&dev->rx_submitted);
949}
950
951static const struct usb_device_id gs_usb_table[] = {
952 {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
953 {} /* Terminating entry */
954};
955
956MODULE_DEVICE_TABLE(usb, gs_usb_table);
957
958static struct usb_driver gs_usb_driver = {
959 .name = "gs_usb",
960 .probe = gs_usb_probe,
961 .disconnect = gs_usb_disconnect,
962 .id_table = gs_usb_table,
963};
964
965module_usb_driver(gs_usb_driver);
966
967MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
968MODULE_DESCRIPTION(
969"Socket CAN device driver for Geschwister Schneider Technologie-, "
970"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
971MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4ca46edc061d..541fb7a05625 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -53,6 +53,8 @@
53#define USB_OEM_MERCURY_PRODUCT_ID 34 53#define USB_OEM_MERCURY_PRODUCT_ID 34
54#define USB_OEM_LEAF_PRODUCT_ID 35 54#define USB_OEM_LEAF_PRODUCT_ID 35
55#define USB_CAN_R_PRODUCT_ID 39 55#define USB_CAN_R_PRODUCT_ID 39
56#define USB_LEAF_LITE_V2_PRODUCT_ID 288
57#define USB_MINI_PCIE_HS_PRODUCT_ID 289
56 58
57/* USB devices features */ 59/* USB devices features */
58#define KVASER_HAS_SILENT_MODE BIT(0) 60#define KVASER_HAS_SILENT_MODE BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
356 .driver_info = KVASER_HAS_TXRX_ERRORS }, 358 .driver_info = KVASER_HAS_TXRX_ERRORS },
357 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), 359 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
358 .driver_info = KVASER_HAS_TXRX_ERRORS }, 360 .driver_info = KVASER_HAS_TXRX_ERRORS },
361 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
362 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
359 { } 363 { }
360}; 364};
361MODULE_DEVICE_TABLE(usb, kvaser_usb_table); 365MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
379 void *buf; 383 void *buf;
380 int actual_len; 384 int actual_len;
381 int err; 385 int err;
382 int pos = 0; 386 int pos;
387 unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
383 388
384 buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL); 389 buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
385 if (!buf) 390 if (!buf)
386 return -ENOMEM; 391 return -ENOMEM;
387 392
388 err = usb_bulk_msg(dev->udev, 393 do {
389 usb_rcvbulkpipe(dev->udev, 394 err = usb_bulk_msg(dev->udev,
390 dev->bulk_in->bEndpointAddress), 395 usb_rcvbulkpipe(dev->udev,
391 buf, RX_BUFFER_SIZE, &actual_len, 396 dev->bulk_in->bEndpointAddress),
392 USB_RECV_TIMEOUT); 397 buf, RX_BUFFER_SIZE, &actual_len,
393 if (err < 0) 398 USB_RECV_TIMEOUT);
394 goto end; 399 if (err < 0)
400 goto end;
395 401
396 while (pos <= actual_len - MSG_HEADER_LEN) { 402 pos = 0;
397 tmp = buf + pos; 403 while (pos <= actual_len - MSG_HEADER_LEN) {
404 tmp = buf + pos;
398 405
399 if (!tmp->len) 406 if (!tmp->len)
400 break; 407 break;
401 408
402 if (pos + tmp->len > actual_len) { 409 if (pos + tmp->len > actual_len) {
403 dev_err(dev->udev->dev.parent, "Format error\n"); 410 dev_err(dev->udev->dev.parent,
404 break; 411 "Format error\n");
405 } 412 break;
413 }
406 414
407 if (tmp->id == id) { 415 if (tmp->id == id) {
408 memcpy(msg, tmp, tmp->len); 416 memcpy(msg, tmp, tmp->len);
409 goto end; 417 goto end;
410 } 418 }
411 419
412 pos += tmp->len; 420 pos += tmp->len;
413 } 421 }
422 } while (time_before(jiffies, to));
414 423
415 err = -EINVAL; 424 err = -EINVAL;
416 425
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
new file mode 100644
index 000000000000..5e8b5609c067
--- /dev/null
+++ b/drivers/net/can/xilinx_can.c
@@ -0,0 +1,1208 @@
1/* Xilinx CAN device driver
2 *
3 * Copyright (C) 2012 - 2014 Xilinx, Inc.
4 * Copyright (C) 2009 PetaLogix. All rights reserved.
5 *
6 * Description:
7 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/netdevice.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
29#include <linux/skbuff.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/can/led.h>
35
36#define DRIVER_NAME "xilinx_can"
37
38/* CAN registers set */
39enum xcan_reg {
40 XCAN_SRR_OFFSET = 0x00, /* Software reset */
41 XCAN_MSR_OFFSET = 0x04, /* Mode select */
42 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
43 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
44 XCAN_ECR_OFFSET = 0x10, /* Error counter */
45 XCAN_ESR_OFFSET = 0x14, /* Error status */
46 XCAN_SR_OFFSET = 0x18, /* Status */
47 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
48 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
49 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
50 XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
51 XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
52 XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
53 XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
54 XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
55 XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
56 XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
57 XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
58};
59
60/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
61#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
62#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
63#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
64#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
65#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
66#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
67#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
68#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
69#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
70#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
71#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
72#define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
73#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
74#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
75#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
76#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
77#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
78#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
79#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
80#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
81#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
82#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
83#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
84#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
85#define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
86#define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
87#define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
88#define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
89#define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
90#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
91#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
92#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
93#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
94#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
95#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
96#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
97#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
98#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
99
100#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
101 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
102 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
103 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
104
105/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
106#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
107#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
108#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
109#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
110#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
111#define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
112
113/* CAN frame length constants */
114#define XCAN_FRAME_MAX_DATA_LEN 8
115#define XCAN_TIMEOUT (1 * HZ)
116
117/**
118 * struct xcan_priv - This definition define CAN driver instance
119 * @can: CAN private data structure.
120 * @tx_head: Tx CAN packets ready to send on the queue
121 * @tx_tail: Tx CAN packets successfully sended on the queue
122 * @tx_max: Maximum number packets the driver can send
123 * @napi: NAPI structure
124 * @read_reg: For reading data from CAN registers
125 * @write_reg: For writing data to CAN registers
126 * @dev: Network device data structure
127 * @reg_base: Ioremapped address to registers
128 * @irq_flags: For request_irq()
129 * @bus_clk: Pointer to struct clk
130 * @can_clk: Pointer to struct clk
131 */
132struct xcan_priv {
133 struct can_priv can;
134 unsigned int tx_head;
135 unsigned int tx_tail;
136 unsigned int tx_max;
137 struct napi_struct napi;
138 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
139 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
140 u32 val);
141 struct net_device *dev;
142 void __iomem *reg_base;
143 unsigned long irq_flags;
144 struct clk *bus_clk;
145 struct clk *can_clk;
146};
147
148/* CAN Bittiming constants as per Xilinx CAN specs */
149static const struct can_bittiming_const xcan_bittiming_const = {
150 .name = DRIVER_NAME,
151 .tseg1_min = 1,
152 .tseg1_max = 16,
153 .tseg2_min = 1,
154 .tseg2_max = 8,
155 .sjw_max = 4,
156 .brp_min = 1,
157 .brp_max = 256,
158 .brp_inc = 1,
159};
160
161/**
162 * xcan_write_reg_le - Write a value to the device register little endian
163 * @priv: Driver private data structure
164 * @reg: Register offset
165 * @val: Value to write at the Register offset
166 *
167 * Write data to the paricular CAN register
168 */
169static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
170 u32 val)
171{
172 iowrite32(val, priv->reg_base + reg);
173}
174
175/**
176 * xcan_read_reg_le - Read a value from the device register little endian
177 * @priv: Driver private data structure
178 * @reg: Register offset
179 *
180 * Read data from the particular CAN register
181 * Return: value read from the CAN register
182 */
183static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
184{
185 return ioread32(priv->reg_base + reg);
186}
187
188/**
189 * xcan_write_reg_be - Write a value to the device register big endian
190 * @priv: Driver private data structure
191 * @reg: Register offset
192 * @val: Value to write at the Register offset
193 *
194 * Write data to the paricular CAN register
195 */
196static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
197 u32 val)
198{
199 iowrite32be(val, priv->reg_base + reg);
200}
201
202/**
203 * xcan_read_reg_be - Read a value from the device register big endian
204 * @priv: Driver private data structure
205 * @reg: Register offset
206 *
207 * Read data from the particular CAN register
208 * Return: value read from the CAN register
209 */
210static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
211{
212 return ioread32be(priv->reg_base + reg);
213}
214
215/**
216 * set_reset_mode - Resets the CAN device mode
217 * @ndev: Pointer to net_device structure
218 *
219 * This is the driver reset mode routine.The driver
220 * enters into configuration mode.
221 *
222 * Return: 0 on success and failure value on error
223 */
224static int set_reset_mode(struct net_device *ndev)
225{
226 struct xcan_priv *priv = netdev_priv(ndev);
227 unsigned long timeout;
228
229 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
230
231 timeout = jiffies + XCAN_TIMEOUT;
232 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
233 if (time_after(jiffies, timeout)) {
234 netdev_warn(ndev, "timed out for config mode\n");
235 return -ETIMEDOUT;
236 }
237 usleep_range(500, 10000);
238 }
239
240 return 0;
241}
242
243/**
244 * xcan_set_bittiming - CAN set bit timing routine
245 * @ndev: Pointer to net_device structure
246 *
247 * This is the driver set bittiming routine.
248 * Return: 0 on success and failure value on error
249 */
250static int xcan_set_bittiming(struct net_device *ndev)
251{
252 struct xcan_priv *priv = netdev_priv(ndev);
253 struct can_bittiming *bt = &priv->can.bittiming;
254 u32 btr0, btr1;
255 u32 is_config_mode;
256
257 /* Check whether Xilinx CAN is in configuration mode.
258 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
259 */
260 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
261 XCAN_SR_CONFIG_MASK;
262 if (!is_config_mode) {
263 netdev_alert(ndev,
264 "BUG! Cannot set bittiming - CAN is not in config mode\n");
265 return -EPERM;
266 }
267
268 /* Setting Baud Rate prescalar value in BRPR Register */
269 btr0 = (bt->brp - 1);
270
271 /* Setting Time Segment 1 in BTR Register */
272 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
273
274 /* Setting Time Segment 2 in BTR Register */
275 btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
276
277 /* Setting Synchronous jump width in BTR Register */
278 btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
279
280 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
281 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
282
283 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
284 priv->read_reg(priv, XCAN_BRPR_OFFSET),
285 priv->read_reg(priv, XCAN_BTR_OFFSET));
286
287 return 0;
288}
289
290/**
291 * xcan_chip_start - This the drivers start routine
292 * @ndev: Pointer to net_device structure
293 *
294 * This is the drivers start routine.
295 * Based on the State of the CAN device it puts
296 * the CAN device into a proper mode.
297 *
298 * Return: 0 on success and failure value on error
299 */
300static int xcan_chip_start(struct net_device *ndev)
301{
302 struct xcan_priv *priv = netdev_priv(ndev);
303 u32 err, reg_msr, reg_sr_mask;
304 unsigned long timeout;
305
306 /* Check if it is in reset mode */
307 err = set_reset_mode(ndev);
308 if (err < 0)
309 return err;
310
311 err = xcan_set_bittiming(ndev);
312 if (err < 0)
313 return err;
314
315 /* Enable interrupts */
316 priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
317
318 /* Check whether it is loopback mode or normal mode */
319 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
320 reg_msr = XCAN_MSR_LBACK_MASK;
321 reg_sr_mask = XCAN_SR_LBACK_MASK;
322 } else {
323 reg_msr = 0x0;
324 reg_sr_mask = XCAN_SR_NORMAL_MASK;
325 }
326
327 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
328 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
329
330 timeout = jiffies + XCAN_TIMEOUT;
331 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
332 if (time_after(jiffies, timeout)) {
333 netdev_warn(ndev,
334 "timed out for correct mode\n");
335 return -ETIMEDOUT;
336 }
337 }
338 netdev_dbg(ndev, "status:#x%08x\n",
339 priv->read_reg(priv, XCAN_SR_OFFSET));
340
341 priv->can.state = CAN_STATE_ERROR_ACTIVE;
342 return 0;
343}
344
345/**
346 * xcan_do_set_mode - This sets the mode of the driver
347 * @ndev: Pointer to net_device structure
348 * @mode: Tells the mode of the driver
349 *
350 * This check the drivers state and calls the
351 * the corresponding modes to set.
352 *
353 * Return: 0 on success and failure value on error
354 */
355static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
356{
357 int ret;
358
359 switch (mode) {
360 case CAN_MODE_START:
361 ret = xcan_chip_start(ndev);
362 if (ret < 0) {
363 netdev_err(ndev, "xcan_chip_start failed!\n");
364 return ret;
365 }
366 netif_wake_queue(ndev);
367 break;
368 default:
369 ret = -EOPNOTSUPP;
370 break;
371 }
372
373 return ret;
374}
375
376/**
377 * xcan_start_xmit - Starts the transmission
378 * @skb: sk_buff pointer that contains data to be Txed
379 * @ndev: Pointer to net_device structure
380 *
381 * This function is invoked from upper layers to initiate transmission. This
382 * function uses the next available free txbuff and populates their fields to
383 * start the transmission.
384 *
385 * Return: 0 on success and failure value on error
386 */
387static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
388{
389 struct xcan_priv *priv = netdev_priv(ndev);
390 struct net_device_stats *stats = &ndev->stats;
391 struct can_frame *cf = (struct can_frame *)skb->data;
392 u32 id, dlc, data[2] = {0, 0};
393
394 if (can_dropped_invalid_skb(ndev, skb))
395 return NETDEV_TX_OK;
396
397 /* Check if the TX buffer is full */
398 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
399 XCAN_SR_TXFLL_MASK)) {
400 netif_stop_queue(ndev);
401 netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
402 return NETDEV_TX_BUSY;
403 }
404
405 /* Watch carefully on the bit sequence */
406 if (cf->can_id & CAN_EFF_FLAG) {
407 /* Extended CAN ID format */
408 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
409 XCAN_IDR_ID2_MASK;
410 id |= (((cf->can_id & CAN_EFF_MASK) >>
411 (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
412 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
413
414 /* The substibute remote TX request bit should be "1"
415 * for extended frames as in the Xilinx CAN datasheet
416 */
417 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
418
419 if (cf->can_id & CAN_RTR_FLAG)
420 /* Extended frames remote TX request */
421 id |= XCAN_IDR_RTR_MASK;
422 } else {
423 /* Standard CAN ID format */
424 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
425 XCAN_IDR_ID1_MASK;
426
427 if (cf->can_id & CAN_RTR_FLAG)
428 /* Standard frames remote TX request */
429 id |= XCAN_IDR_SRR_MASK;
430 }
431
432 dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
433
434 if (cf->can_dlc > 0)
435 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
436 if (cf->can_dlc > 4)
437 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
438
439 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
440 priv->tx_head++;
441
442 /* Write the Frame to Xilinx CAN TX FIFO */
443 priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
444 /* If the CAN frame is RTR frame this write triggers tranmission */
445 priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
446 if (!(cf->can_id & CAN_RTR_FLAG)) {
447 priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
448 /* If the CAN frame is Standard/Extended frame this
449 * write triggers tranmission
450 */
451 priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
452 stats->tx_bytes += cf->can_dlc;
453 }
454
455 /* Check if the TX buffer is full */
456 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
457 netif_stop_queue(ndev);
458
459 return NETDEV_TX_OK;
460}
461
462/**
463 * xcan_rx - Is called from CAN isr to complete the received
464 * frame processing
465 * @ndev: Pointer to net_device structure
466 *
467 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
468 * does minimal processing and invokes "netif_receive_skb" to complete further
469 * processing.
470 * Return: 1 on success and 0 on failure.
471 */
472static int xcan_rx(struct net_device *ndev)
473{
474 struct xcan_priv *priv = netdev_priv(ndev);
475 struct net_device_stats *stats = &ndev->stats;
476 struct can_frame *cf;
477 struct sk_buff *skb;
478 u32 id_xcan, dlc, data[2] = {0, 0};
479
480 skb = alloc_can_skb(ndev, &cf);
481 if (unlikely(!skb)) {
482 stats->rx_dropped++;
483 return 0;
484 }
485
486 /* Read a frame from Xilinx zynq CANPS */
487 id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
488 dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
489 XCAN_DLCR_DLC_SHIFT;
490
491 /* Change Xilinx CAN data length format to socketCAN data format */
492 cf->can_dlc = get_can_dlc(dlc);
493
494 /* Change Xilinx CAN ID format to socketCAN ID format */
495 if (id_xcan & XCAN_IDR_IDE_MASK) {
496 /* The received frame is an Extended format frame */
497 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
498 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
499 XCAN_IDR_ID2_SHIFT;
500 cf->can_id |= CAN_EFF_FLAG;
501 if (id_xcan & XCAN_IDR_RTR_MASK)
502 cf->can_id |= CAN_RTR_FLAG;
503 } else {
504 /* The received frame is a standard format frame */
505 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
506 XCAN_IDR_ID1_SHIFT;
507 if (id_xcan & XCAN_IDR_SRR_MASK)
508 cf->can_id |= CAN_RTR_FLAG;
509 }
510
511 if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
512 data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
513 data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
514
515 /* Change Xilinx CAN data format to socketCAN data format */
516 if (cf->can_dlc > 0)
517 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
518 if (cf->can_dlc > 4)
519 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
520 }
521
522 stats->rx_bytes += cf->can_dlc;
523 stats->rx_packets++;
524 netif_receive_skb(skb);
525
526 return 1;
527}
528
529/**
530 * xcan_err_interrupt - error frame Isr
531 * @ndev: net_device pointer
532 * @isr: interrupt status register value
533 *
534 * This is the CAN error interrupt and it will
535 * check the the type of error and forward the error
536 * frame to upper layers.
537 */
538static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
539{
540 struct xcan_priv *priv = netdev_priv(ndev);
541 struct net_device_stats *stats = &ndev->stats;
542 struct can_frame *cf;
543 struct sk_buff *skb;
544 u32 err_status, status, txerr = 0, rxerr = 0;
545
546 skb = alloc_can_err_skb(ndev, &cf);
547
548 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
549 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
550 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
551 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
552 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
553 status = priv->read_reg(priv, XCAN_SR_OFFSET);
554
555 if (isr & XCAN_IXR_BSOFF_MASK) {
556 priv->can.state = CAN_STATE_BUS_OFF;
557 priv->can.can_stats.bus_off++;
558 /* Leave device in Config Mode in bus-off state */
559 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
560 can_bus_off(ndev);
561 if (skb)
562 cf->can_id |= CAN_ERR_BUSOFF;
563 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
564 priv->can.state = CAN_STATE_ERROR_PASSIVE;
565 priv->can.can_stats.error_passive++;
566 if (skb) {
567 cf->can_id |= CAN_ERR_CRTL;
568 cf->data[1] = (rxerr > 127) ?
569 CAN_ERR_CRTL_RX_PASSIVE :
570 CAN_ERR_CRTL_TX_PASSIVE;
571 cf->data[6] = txerr;
572 cf->data[7] = rxerr;
573 }
574 } else if (status & XCAN_SR_ERRWRN_MASK) {
575 priv->can.state = CAN_STATE_ERROR_WARNING;
576 priv->can.can_stats.error_warning++;
577 if (skb) {
578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] |= (txerr > rxerr) ?
580 CAN_ERR_CRTL_TX_WARNING :
581 CAN_ERR_CRTL_RX_WARNING;
582 cf->data[6] = txerr;
583 cf->data[7] = rxerr;
584 }
585 }
586
587 /* Check for Arbitration lost interrupt */
588 if (isr & XCAN_IXR_ARBLST_MASK) {
589 priv->can.can_stats.arbitration_lost++;
590 if (skb) {
591 cf->can_id |= CAN_ERR_LOSTARB;
592 cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
593 }
594 }
595
596 /* Check for RX FIFO Overflow interrupt */
597 if (isr & XCAN_IXR_RXOFLW_MASK) {
598 stats->rx_over_errors++;
599 stats->rx_errors++;
600 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
601 if (skb) {
602 cf->can_id |= CAN_ERR_CRTL;
603 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
604 }
605 }
606
607 /* Check for error interrupt */
608 if (isr & XCAN_IXR_ERROR_MASK) {
609 if (skb) {
610 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
611 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
612 }
613
614 /* Check for Ack error interrupt */
615 if (err_status & XCAN_ESR_ACKER_MASK) {
616 stats->tx_errors++;
617 if (skb) {
618 cf->can_id |= CAN_ERR_ACK;
619 cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
620 }
621 }
622
623 /* Check for Bit error interrupt */
624 if (err_status & XCAN_ESR_BERR_MASK) {
625 stats->tx_errors++;
626 if (skb) {
627 cf->can_id |= CAN_ERR_PROT;
628 cf->data[2] = CAN_ERR_PROT_BIT;
629 }
630 }
631
632 /* Check for Stuff error interrupt */
633 if (err_status & XCAN_ESR_STER_MASK) {
634 stats->rx_errors++;
635 if (skb) {
636 cf->can_id |= CAN_ERR_PROT;
637 cf->data[2] = CAN_ERR_PROT_STUFF;
638 }
639 }
640
641 /* Check for Form error interrupt */
642 if (err_status & XCAN_ESR_FMER_MASK) {
643 stats->rx_errors++;
644 if (skb) {
645 cf->can_id |= CAN_ERR_PROT;
646 cf->data[2] = CAN_ERR_PROT_FORM;
647 }
648 }
649
650 /* Check for CRC error interrupt */
651 if (err_status & XCAN_ESR_CRCER_MASK) {
652 stats->rx_errors++;
653 if (skb) {
654 cf->can_id |= CAN_ERR_PROT;
655 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
656 CAN_ERR_PROT_LOC_CRC_DEL;
657 }
658 }
659 priv->can.can_stats.bus_error++;
660 }
661
662 if (skb) {
663 stats->rx_packets++;
664 stats->rx_bytes += cf->can_dlc;
665 netif_rx(skb);
666 }
667
668 netdev_dbg(ndev, "%s: error status register:0x%x\n",
669 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
670}
671
672/**
673 * xcan_state_interrupt - It will check the state of the CAN device
674 * @ndev: net_device pointer
675 * @isr: interrupt status register value
676 *
677 * This will checks the state of the CAN device
678 * and puts the device into appropriate state.
679 */
680static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
681{
682 struct xcan_priv *priv = netdev_priv(ndev);
683
684 /* Check for Sleep interrupt if set put CAN device in sleep state */
685 if (isr & XCAN_IXR_SLP_MASK)
686 priv->can.state = CAN_STATE_SLEEPING;
687
688 /* Check for Wake up interrupt if set put CAN device in Active state */
689 if (isr & XCAN_IXR_WKUP_MASK)
690 priv->can.state = CAN_STATE_ERROR_ACTIVE;
691}
692
693/**
694 * xcan_rx_poll - Poll routine for rx packets (NAPI)
695 * @napi: napi structure pointer
696 * @quota: Max number of rx packets to be processed.
697 *
698 * This is the poll routine for rx part.
699 * It will process the packets maximux quota value.
700 *
701 * Return: number of packets received
702 */
703static int xcan_rx_poll(struct napi_struct *napi, int quota)
704{
705 struct net_device *ndev = napi->dev;
706 struct xcan_priv *priv = netdev_priv(ndev);
707 u32 isr, ier;
708 int work_done = 0;
709
710 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
711 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
712 if (isr & XCAN_IXR_RXOK_MASK) {
713 priv->write_reg(priv, XCAN_ICR_OFFSET,
714 XCAN_IXR_RXOK_MASK);
715 work_done += xcan_rx(ndev);
716 } else {
717 priv->write_reg(priv, XCAN_ICR_OFFSET,
718 XCAN_IXR_RXNEMP_MASK);
719 break;
720 }
721 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
722 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
723 }
724
725 if (work_done)
726 can_led_event(ndev, CAN_LED_EVENT_RX);
727
728 if (work_done < quota) {
729 napi_complete(napi);
730 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
731 ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
732 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
733 }
734 return work_done;
735}
736
737/**
738 * xcan_tx_interrupt - Tx Done Isr
739 * @ndev: net_device pointer
740 * @isr: Interrupt status register value
741 */
742static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
743{
744 struct xcan_priv *priv = netdev_priv(ndev);
745 struct net_device_stats *stats = &ndev->stats;
746
747 while ((priv->tx_head - priv->tx_tail > 0) &&
748 (isr & XCAN_IXR_TXOK_MASK)) {
749 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
750 can_get_echo_skb(ndev, priv->tx_tail %
751 priv->tx_max);
752 priv->tx_tail++;
753 stats->tx_packets++;
754 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
755 }
756 can_led_event(ndev, CAN_LED_EVENT_TX);
757 netif_wake_queue(ndev);
758}
759
760/**
761 * xcan_interrupt - CAN Isr
762 * @irq: irq number
763 * @dev_id: device id poniter
764 *
765 * This is the xilinx CAN Isr. It checks for the type of interrupt
766 * and invokes the corresponding ISR.
767 *
768 * Return:
769 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
770 */
771static irqreturn_t xcan_interrupt(int irq, void *dev_id)
772{
773 struct net_device *ndev = (struct net_device *)dev_id;
774 struct xcan_priv *priv = netdev_priv(ndev);
775 u32 isr, ier;
776
777 /* Get the interrupt status from Xilinx CAN */
778 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
779 if (!isr)
780 return IRQ_NONE;
781
782 /* Check for the type of interrupt and Processing it */
783 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
784 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
785 XCAN_IXR_WKUP_MASK));
786 xcan_state_interrupt(ndev, isr);
787 }
788
789 /* Check for Tx interrupt and Processing it */
790 if (isr & XCAN_IXR_TXOK_MASK)
791 xcan_tx_interrupt(ndev, isr);
792
793 /* Check for the type of error interrupt and Processing it */
794 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
795 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
796 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
797 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
798 XCAN_IXR_ARBLST_MASK));
799 xcan_err_interrupt(ndev, isr);
800 }
801
802 /* Check for the type of receive interrupt and Processing it */
803 if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
804 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
805 ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
806 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
807 napi_schedule(&priv->napi);
808 }
809 return IRQ_HANDLED;
810}
811
812/**
813 * xcan_chip_stop - Driver stop routine
814 * @ndev: Pointer to net_device structure
815 *
816 * This is the drivers stop routine. It will disable the
817 * interrupts and put the device into configuration mode.
818 */
819static void xcan_chip_stop(struct net_device *ndev)
820{
821 struct xcan_priv *priv = netdev_priv(ndev);
822 u32 ier;
823
824 /* Disable interrupts and leave the can in configuration mode */
825 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
826 ier &= ~XCAN_INTR_ALL;
827 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
828 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
829 priv->can.state = CAN_STATE_STOPPED;
830}
831
832/**
833 * xcan_open - Driver open routine
834 * @ndev: Pointer to net_device structure
835 *
836 * This is the driver open routine.
837 * Return: 0 on success and failure value on error
838 */
839static int xcan_open(struct net_device *ndev)
840{
841 struct xcan_priv *priv = netdev_priv(ndev);
842 int ret;
843
844 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
845 ndev->name, ndev);
846 if (ret < 0) {
847 netdev_err(ndev, "irq allocation for CAN failed\n");
848 goto err;
849 }
850
851 ret = clk_prepare_enable(priv->can_clk);
852 if (ret) {
853 netdev_err(ndev, "unable to enable device clock\n");
854 goto err_irq;
855 }
856
857 ret = clk_prepare_enable(priv->bus_clk);
858 if (ret) {
859 netdev_err(ndev, "unable to enable bus clock\n");
860 goto err_can_clk;
861 }
862
863 /* Set chip into reset mode */
864 ret = set_reset_mode(ndev);
865 if (ret < 0) {
866 netdev_err(ndev, "mode resetting failed!\n");
867 goto err_bus_clk;
868 }
869
870 /* Common open */
871 ret = open_candev(ndev);
872 if (ret)
873 goto err_bus_clk;
874
875 ret = xcan_chip_start(ndev);
876 if (ret < 0) {
877 netdev_err(ndev, "xcan_chip_start failed!\n");
878 goto err_candev;
879 }
880
881 can_led_event(ndev, CAN_LED_EVENT_OPEN);
882 napi_enable(&priv->napi);
883 netif_start_queue(ndev);
884
885 return 0;
886
887err_candev:
888 close_candev(ndev);
889err_bus_clk:
890 clk_disable_unprepare(priv->bus_clk);
891err_can_clk:
892 clk_disable_unprepare(priv->can_clk);
893err_irq:
894 free_irq(ndev->irq, ndev);
895err:
896 return ret;
897}
898
899/**
900 * xcan_close - Driver close routine
901 * @ndev: Pointer to net_device structure
902 *
903 * Return: 0 always
904 */
905static int xcan_close(struct net_device *ndev)
906{
907 struct xcan_priv *priv = netdev_priv(ndev);
908
909 netif_stop_queue(ndev);
910 napi_disable(&priv->napi);
911 xcan_chip_stop(ndev);
912 clk_disable_unprepare(priv->bus_clk);
913 clk_disable_unprepare(priv->can_clk);
914 free_irq(ndev->irq, ndev);
915 close_candev(ndev);
916
917 can_led_event(ndev, CAN_LED_EVENT_STOP);
918
919 return 0;
920}
921
922/**
923 * xcan_get_berr_counter - error counter routine
924 * @ndev: Pointer to net_device structure
925 * @bec: Pointer to can_berr_counter structure
926 *
927 * This is the driver error counter routine.
928 * Return: 0 on success and failure value on error
929 */
930static int xcan_get_berr_counter(const struct net_device *ndev,
931 struct can_berr_counter *bec)
932{
933 struct xcan_priv *priv = netdev_priv(ndev);
934 int ret;
935
936 ret = clk_prepare_enable(priv->can_clk);
937 if (ret)
938 goto err;
939
940 ret = clk_prepare_enable(priv->bus_clk);
941 if (ret)
942 goto err_clk;
943
944 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
945 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
946 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
947
948 clk_disable_unprepare(priv->bus_clk);
949 clk_disable_unprepare(priv->can_clk);
950
951 return 0;
952
953err_clk:
954 clk_disable_unprepare(priv->can_clk);
955err:
956 return ret;
957}
958
959
960static const struct net_device_ops xcan_netdev_ops = {
961 .ndo_open = xcan_open,
962 .ndo_stop = xcan_close,
963 .ndo_start_xmit = xcan_start_xmit,
964};
965
966/**
967 * xcan_suspend - Suspend method for the driver
968 * @dev: Address of the platform_device structure
969 *
970 * Put the driver into low power mode.
971 * Return: 0 always
972 */
973static int __maybe_unused xcan_suspend(struct device *dev)
974{
975 struct platform_device *pdev = dev_get_drvdata(dev);
976 struct net_device *ndev = platform_get_drvdata(pdev);
977 struct xcan_priv *priv = netdev_priv(ndev);
978
979 if (netif_running(ndev)) {
980 netif_stop_queue(ndev);
981 netif_device_detach(ndev);
982 }
983
984 priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
985 priv->can.state = CAN_STATE_SLEEPING;
986
987 clk_disable(priv->bus_clk);
988 clk_disable(priv->can_clk);
989
990 return 0;
991}
992
993/**
994 * xcan_resume - Resume from suspend
995 * @dev: Address of the platformdevice structure
996 *
997 * Resume operation after suspend.
998 * Return: 0 on success and failure value on error
999 */
1000static int __maybe_unused xcan_resume(struct device *dev)
1001{
1002 struct platform_device *pdev = dev_get_drvdata(dev);
1003 struct net_device *ndev = platform_get_drvdata(pdev);
1004 struct xcan_priv *priv = netdev_priv(ndev);
1005 int ret;
1006
1007 ret = clk_enable(priv->bus_clk);
1008 if (ret) {
1009 dev_err(dev, "Cannot enable clock.\n");
1010 return ret;
1011 }
1012 ret = clk_enable(priv->can_clk);
1013 if (ret) {
1014 dev_err(dev, "Cannot enable clock.\n");
1015 clk_disable_unprepare(priv->bus_clk);
1016 return ret;
1017 }
1018
1019 priv->write_reg(priv, XCAN_MSR_OFFSET, 0);
1020 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
1021 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1022
1023 if (netif_running(ndev)) {
1024 netif_device_attach(ndev);
1025 netif_start_queue(ndev);
1026 }
1027
1028 return 0;
1029}
1030
1031static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
1032
1033/**
1034 * xcan_probe - Platform registration call
1035 * @pdev: Handle to the platform device structure
1036 *
1037 * This function does all the memory allocation and registration for the CAN
1038 * device.
1039 *
1040 * Return: 0 on success and failure value on error
1041 */
1042static int xcan_probe(struct platform_device *pdev)
1043{
1044 struct resource *res; /* IO mem resources */
1045 struct net_device *ndev;
1046 struct xcan_priv *priv;
1047 void __iomem *addr;
1048 int ret, rx_max, tx_max;
1049
1050 /* Get the virtual base address for the device */
1051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1052 addr = devm_ioremap_resource(&pdev->dev, res);
1053 if (IS_ERR(addr)) {
1054 ret = PTR_ERR(addr);
1055 goto err;
1056 }
1057
1058 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
1059 if (ret < 0)
1060 goto err;
1061
1062 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
1063 if (ret < 0)
1064 goto err;
1065
1066 /* Create a CAN device instance */
1067 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1068 if (!ndev)
1069 return -ENOMEM;
1070
1071 priv = netdev_priv(ndev);
1072 priv->dev = ndev;
1073 priv->can.bittiming_const = &xcan_bittiming_const;
1074 priv->can.do_set_mode = xcan_do_set_mode;
1075 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1076 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1077 CAN_CTRLMODE_BERR_REPORTING;
1078 priv->reg_base = addr;
1079 priv->tx_max = tx_max;
1080
1081 /* Get IRQ for the device */
1082 ndev->irq = platform_get_irq(pdev, 0);
1083 ndev->flags |= IFF_ECHO; /* We support local echo */
1084
1085 platform_set_drvdata(pdev, ndev);
1086 SET_NETDEV_DEV(ndev, &pdev->dev);
1087 ndev->netdev_ops = &xcan_netdev_ops;
1088
1089 /* Getting the CAN can_clk info */
1090 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1091 if (IS_ERR(priv->can_clk)) {
1092 dev_err(&pdev->dev, "Device clock not found.\n");
1093 ret = PTR_ERR(priv->can_clk);
1094 goto err_free;
1095 }
1096 /* Check for type of CAN device */
1097 if (of_device_is_compatible(pdev->dev.of_node,
1098 "xlnx,zynq-can-1.0")) {
1099 priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
1100 if (IS_ERR(priv->bus_clk)) {
1101 dev_err(&pdev->dev, "bus clock not found\n");
1102 ret = PTR_ERR(priv->bus_clk);
1103 goto err_free;
1104 }
1105 } else {
1106 priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1107 if (IS_ERR(priv->bus_clk)) {
1108 dev_err(&pdev->dev, "bus clock not found\n");
1109 ret = PTR_ERR(priv->bus_clk);
1110 goto err_free;
1111 }
1112 }
1113
1114 ret = clk_prepare_enable(priv->can_clk);
1115 if (ret) {
1116 dev_err(&pdev->dev, "unable to enable device clock\n");
1117 goto err_free;
1118 }
1119
1120 ret = clk_prepare_enable(priv->bus_clk);
1121 if (ret) {
1122 dev_err(&pdev->dev, "unable to enable bus clock\n");
1123 goto err_unprepare_disable_dev;
1124 }
1125
1126 priv->write_reg = xcan_write_reg_le;
1127 priv->read_reg = xcan_read_reg_le;
1128
1129 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1130 priv->write_reg = xcan_write_reg_be;
1131 priv->read_reg = xcan_read_reg_be;
1132 }
1133
1134 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1135
1136 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1137
1138 ret = register_candev(ndev);
1139 if (ret) {
1140 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1141 goto err_unprepare_disable_busclk;
1142 }
1143
1144 devm_can_led_init(ndev);
1145 clk_disable_unprepare(priv->bus_clk);
1146 clk_disable_unprepare(priv->can_clk);
1147 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
1148 priv->reg_base, ndev->irq, priv->can.clock.freq,
1149 priv->tx_max);
1150
1151 return 0;
1152
1153err_unprepare_disable_busclk:
1154 clk_disable_unprepare(priv->bus_clk);
1155err_unprepare_disable_dev:
1156 clk_disable_unprepare(priv->can_clk);
1157err_free:
1158 free_candev(ndev);
1159err:
1160 return ret;
1161}
1162
1163/**
1164 * xcan_remove - Unregister the device after releasing the resources
1165 * @pdev: Handle to the platform device structure
1166 *
1167 * This function frees all the resources allocated to the device.
1168 * Return: 0 always
1169 */
1170static int xcan_remove(struct platform_device *pdev)
1171{
1172 struct net_device *ndev = platform_get_drvdata(pdev);
1173 struct xcan_priv *priv = netdev_priv(ndev);
1174
1175 if (set_reset_mode(ndev) < 0)
1176 netdev_err(ndev, "mode resetting failed!\n");
1177
1178 unregister_candev(ndev);
1179 netif_napi_del(&priv->napi);
1180 free_candev(ndev);
1181
1182 return 0;
1183}
1184
1185/* Match table for OF platform binding */
1186static struct of_device_id xcan_of_match[] = {
1187 { .compatible = "xlnx,zynq-can-1.0", },
1188 { .compatible = "xlnx,axi-can-1.00.a", },
1189 { /* end of list */ },
1190};
1191MODULE_DEVICE_TABLE(of, xcan_of_match);
1192
1193static struct platform_driver xcan_driver = {
1194 .probe = xcan_probe,
1195 .remove = xcan_remove,
1196 .driver = {
1197 .owner = THIS_MODULE,
1198 .name = DRIVER_NAME,
1199 .pm = &xcan_dev_pm_ops,
1200 .of_match_table = xcan_of_match,
1201 },
1202};
1203
1204module_platform_driver(xcan_driver);
1205
1206MODULE_LICENSE("GPL");
1207MODULE_AUTHOR("Xilinx Inc");
1208MODULE_DESCRIPTION("Xilinx CAN interface");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 41ee5b6ae917..69c42513dd72 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
289 289
290static int mv88e6123_61_65_setup(struct dsa_switch *ds) 290static int mv88e6123_61_65_setup(struct dsa_switch *ds)
291{ 291{
292 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 292 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
293 int i; 293 int i;
294 int ret; 294 int ret;
295 295
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index dadfafba64e9..953bc6a49e59 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
155 155
156static int mv88e6131_setup_port(struct dsa_switch *ds, int p) 156static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
157{ 157{
158 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 158 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
159 int addr = REG_PORT(p); 159 int addr = REG_PORT(p);
160 u16 val; 160 u16 val;
161 161
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
274 274
275static int mv88e6131_setup(struct dsa_switch *ds) 275static int mv88e6131_setup(struct dsa_switch *ds)
276{ 276{
277 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 277 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
278 int i; 278 int i;
279 int ret; 279 int ret;
280 280
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 17314ed9456d..9ce2146346b6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
74 74
75int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) 75int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
76{ 76{
77 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 77 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
78 int ret; 78 int ret;
79 79
80 mutex_lock(&ps->smi_mutex); 80 mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
118 118
119int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) 119int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
120{ 120{
121 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 121 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
122 int ret; 122 int ret;
123 123
124 mutex_lock(&ps->smi_mutex); 124 mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
256 256
257static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) 257static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
258{ 258{
259 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 259 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
260 int ret; 260 int ret;
261 261
262 mutex_lock(&ps->ppu_mutex); 262 mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
283 283
284static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds) 284static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
285{ 285{
286 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 286 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
287 287
288 /* Schedule a timer to re-enable the PHY polling unit. */ 288 /* Schedule a timer to re-enable the PHY polling unit. */
289 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); 289 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
292 292
293void mv88e6xxx_ppu_state_init(struct dsa_switch *ds) 293void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
294{ 294{
295 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 295 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
296 296
297 mutex_init(&ps->ppu_mutex); 297 mutex_init(&ps->ppu_mutex);
298 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); 298 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
463 int nr_stats, struct mv88e6xxx_hw_stat *stats, 463 int nr_stats, struct mv88e6xxx_hw_stat *stats,
464 int port, uint64_t *data) 464 int port, uint64_t *data)
465{ 465{
466 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); 466 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
467 int ret; 467 int ret;
468 int i; 468 int i;
469 469
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 35df0b9e6848..a968654b631d 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
534 /* The EL3-specific entries in the device structure. */ 534 /* The EL3-specific entries in the device structure. */
535 dev->netdev_ops = &netdev_ops; 535 dev->netdev_ops = &netdev_ops;
536 dev->watchdog_timeo = TX_TIMEOUT; 536 dev->watchdog_timeo = TX_TIMEOUT;
537 SET_ETHTOOL_OPS(dev, &ethtool_ops); 537 dev->ethtool_ops = &ethtool_ops;
538 538
539 err = register_netdev(dev); 539 err = register_netdev(dev);
540 if (err) { 540 if (err) {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 063557e037f2..f18647c23559 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
218 dev->netdev_ops = &el3_netdev_ops; 218 dev->netdev_ops = &el3_netdev_ops;
219 dev->watchdog_timeo = TX_TIMEOUT; 219 dev->watchdog_timeo = TX_TIMEOUT;
220 220
221 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 221 dev->ethtool_ops = &netdev_ethtool_ops;
222 222
223 return tc589_config(link); 223 return tc589_config(link);
224} 224}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 465cc7108d8a..e13b04624ded 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2435 netif_napi_add(dev, &tp->napi, typhoon_poll, 16); 2435 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2436 dev->watchdog_timeo = TX_TIMEOUT; 2436 dev->watchdog_timeo = TX_TIMEOUT;
2437 2437
2438 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); 2438 dev->ethtool_ops = &typhoon_ethtool_ops;
2439 2439
2440 /* We can handle scatter gather, up to 16 entries, and 2440 /* We can handle scatter gather, up to 16 entries, and
2441 * we can do IP checksumming (only version 4, doh...) 2441 * we can do IP checksumming (only version 4, doh...)
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c399b52..1d162ccb4733 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
157 157
158 /* This check _should_not_ be necessary, omit eventually. */ 158 /* This check _should_not_ be necessary, omit eventually. */
159 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 159 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
160 if (jiffies - reset_start_time > 2 * HZ / 100) { 160 if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
161 netdev_warn(dev, "%s: did not complete.\n", __func__); 161 netdev_warn(dev, "%s: did not complete.\n", __func__);
162 break; 162 break;
163 } 163 }
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
293 dma_start = jiffies; 293 dma_start = jiffies;
294 294
295 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 295 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
296 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ 296 if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
297 netdev_warn(dev, "timeout waiting for Tx RDC.\n"); 297 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
298 ax_reset_8390(dev); 298 ax_reset_8390(dev);
299 ax_NS8390_init(dev, 1); 299 ax_NS8390_init(dev, 1);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 051349458462..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -68,6 +68,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
68source "drivers/net/ethernet/faraday/Kconfig" 68source "drivers/net/ethernet/faraday/Kconfig"
69source "drivers/net/ethernet/freescale/Kconfig" 69source "drivers/net/ethernet/freescale/Kconfig"
70source "drivers/net/ethernet/fujitsu/Kconfig" 70source "drivers/net/ethernet/fujitsu/Kconfig"
71source "drivers/net/ethernet/hisilicon/Kconfig"
71source "drivers/net/ethernet/hp/Kconfig" 72source "drivers/net/ethernet/hp/Kconfig"
72source "drivers/net/ethernet/ibm/Kconfig" 73source "drivers/net/ethernet/ibm/Kconfig"
73source "drivers/net/ethernet/intel/Kconfig" 74source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e36c456..58de3339ab3c 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
31obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ 31obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
32obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ 32obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
33obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ 33obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
34obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
34obj-$(CONFIG_NET_VENDOR_HP) += hp/ 35obj-$(CONFIG_NET_VENDOR_HP) += hp/
35obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ 36obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
36obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 37obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 171d73c1d3c2..40dbbf740331 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
784 784
785 dev->netdev_ops = &netdev_ops; 785 dev->netdev_ops = &netdev_ops;
786 dev->watchdog_timeo = TX_TIMEOUT; 786 dev->watchdog_timeo = TX_TIMEOUT;
787 SET_ETHTOOL_OPS(dev, &ethtool_ops); 787 dev->ethtool_ops = &ethtool_ops;
788 788
789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); 789 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
790 790
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1517e9df5ba1..9a6991be9749 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
476 dev->watchdog_timeo = 5*HZ; 476 dev->watchdog_timeo = 5*HZ;
477 477
478 dev->netdev_ops = &ace_netdev_ops; 478 dev->netdev_ops = &ace_netdev_ops;
479 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); 479 dev->ethtool_ops = &ace_ethtool_ops;
480 480
481 /* we only display this string ONCE */ 481 /* we only display this string ONCE */
482 if (!boards_found) 482 if (!boards_found)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f451cf..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -353,7 +353,6 @@ static int sgdma_async_read(struct altera_tse_private *priv)
353 353
354 struct sgdma_descrip __iomem *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
355 struct sgdma_descrip __iomem *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
356
357 struct tse_buffer *rxbuffer = NULL; 356 struct tse_buffer *rxbuffer = NULL;
358 357
359 if (!sgdma_rxbusy(priv)) { 358 if (!sgdma_rxbusy(priv)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25eff7952..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -271,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
271 271
272void altera_tse_set_ethtool_ops(struct net_device *netdev) 272void altera_tse_set_ethtool_ops(struct net_device *netdev)
273{ 273{
274 SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops); 274 netdev->ethtool_ops = &tse_ethtool_ops;
275} 275}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 562df46e0a82..bbaf36d9f5e1 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
7 default y 7 default y
8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \ 8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \ 9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
10 (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA 10 (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
11 ---help--- 11 ---help---
12 If you have a network (Ethernet) chipset belonging to this class, 12 If you have a network (Ethernet) chipset belonging to this class,
13 say Y. 13 say Y.
@@ -177,4 +177,16 @@ config SUNLANCE
177 To compile this driver as a module, choose M here: the module 177 To compile this driver as a module, choose M here: the module
178 will be called sunlance. 178 will be called sunlance.
179 179
180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver"
182 depends on OF_NET
183 select PHYLIB
184 select AMD_XGBE_PHY
185 ---help---
186 This driver supports the AMD 10GbE Ethernet device found on an
187 AMD SoC.
188
189 To compile this driver as a module, choose M here: the module
190 will be called amd-xgbe.
191
180endif # NET_VENDOR_AMD 192endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index cdd4301a973d..a38a2dce3eb3 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NI65) += ni65.o
17obj-$(CONFIG_PCNET32) += pcnet32.o 17obj-$(CONFIG_PCNET32) += pcnet32.o
18obj-$(CONFIG_SUN3LANCE) += sun3lance.o 18obj-$(CONFIG_SUN3LANCE) += sun3lance.o
19obj-$(CONFIG_SUNLANCE) += sunlance.o 19obj-$(CONFIG_SUNLANCE) += sunlance.o
20obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 26efaaa5e73f..068dc7cad5fa 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1900 1900
1901 /* Initialize driver entry points */ 1901 /* Initialize driver entry points */
1902 dev->netdev_ops = &amd8111e_netdev_ops; 1902 dev->netdev_ops = &amd8111e_netdev_ops;
1903 SET_ETHTOOL_OPS(dev, &ops); 1903 dev->ethtool_ops = &ops;
1904 dev->irq =pdev->irq; 1904 dev->irq =pdev->irq;
1905 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; 1905 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1906 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); 1906 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index b08101b31b8b..968b7bfac8fc 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
718 unsigned long mem_start = board + ARIADNE_RAM; 718 unsigned long mem_start = board + ARIADNE_RAM;
719 struct resource *r1, *r2; 719 struct resource *r1, *r2;
720 struct net_device *dev; 720 struct net_device *dev;
721 struct ariadne_private *priv;
722 u32 serial; 721 u32 serial;
723 int err; 722 int err;
724 723
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
738 return -ENOMEM; 737 return -ENOMEM;
739 } 738 }
740 739
741 priv = netdev_priv(dev);
742
743 r1->name = dev->name; 740 r1->name = dev->name;
744 r2->name = dev->name; 741 r2->name = dev->name;
745 742
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a2bd91e3d302..a78e4c136959 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
1229 dev->base_addr = base->start; 1229 dev->base_addr = base->start;
1230 dev->irq = irq; 1230 dev->irq = irq;
1231 dev->netdev_ops = &au1000_netdev_ops; 1231 dev->netdev_ops = &au1000_netdev_ops;
1232 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); 1232 dev->ethtool_ops = &au1000_ethtool_ops;
1233 dev->watchdog_timeo = ETH_TX_TIMEOUT; 1233 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1234 1234
1235 /* 1235 /*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 47ce57c2c893..6c9de117ffc6 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -27,9 +27,9 @@
27 27
28#include "hplance.h" 28#include "hplance.h"
29 29
30/* We have 16834 bytes of RAM for the init block and buffers. This places 30/* We have 16392 bytes of RAM for the init block and buffers. This places
31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx 31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
32 * buffers and 2 Tx buffers. 32 * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
33 */ 33 */
34#define LANCE_LOG_TX_BUFFERS 1 34#define LANCE_LOG_TX_BUFFERS 1
35#define LANCE_LOG_RX_BUFFERS 3 35#define LANCE_LOG_RX_BUFFERS 3
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0e8399dec054..0660ac5846bb 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -26,9 +26,9 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/mvme147hw.h> 27#include <asm/mvme147hw.h>
28 28
29/* We have 16834 bytes of RAM for the init block and buffers. This places 29/* We have 32K of RAM for the init block and buffers. This places
30 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx 30 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
31 * buffers and 2 Tx buffers. 31 * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
32 */ 32 */
33#define LANCE_LOG_TX_BUFFERS 1 33#define LANCE_LOG_TX_BUFFERS 1
34#define LANCE_LOG_RX_BUFFERS 3 34#define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
111 dev->dev_addr); 111 dev->dev_addr);
112 112
113 lp = netdev_priv(dev); 113 lp = netdev_priv(dev);
114 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ 114 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
115 if (!lp->ram) { 115 if (!lp->ram) {
116 printk("%s: No memory for LANCE buffers\n", dev->name); 116 printk("%s: No memory for LANCE buffers\n", dev->name);
117 free_netdev(dev); 117 free_netdev(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 08569fe2b182..abf3b1581c82 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
457 lp->tx_free_frames=AM2150_MAX_TX_FRAMES; 457 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
458 458
459 dev->netdev_ops = &mace_netdev_ops; 459 dev->netdev_ops = &mace_netdev_ops;
460 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 460 dev->ethtool_ops = &netdev_ethtool_ops;
461 dev->watchdog_timeo = TX_TIMEOUT; 461 dev->watchdog_timeo = TX_TIMEOUT;
462 462
463 return nmclan_config(link); 463 return nmclan_config(link);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000000..26cf9af1642f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
2
3amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
4 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
5
6amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000000..bf462ee86f5c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1007 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#ifndef __XGBE_COMMON_H__
118#define __XGBE_COMMON_H__
119
120/* DMA register offsets */
121#define DMA_MR 0x3000
122#define DMA_SBMR 0x3004
123#define DMA_ISR 0x3008
124#define DMA_AXIARCR 0x3010
125#define DMA_AXIAWCR 0x3018
126#define DMA_DSR0 0x3020
127#define DMA_DSR1 0x3024
128#define DMA_DSR2 0x3028
129#define DMA_DSR3 0x302c
130#define DMA_DSR4 0x3030
131
132/* DMA register entry bit positions and sizes */
133#define DMA_AXIARCR_DRC_INDEX 0
134#define DMA_AXIARCR_DRC_WIDTH 4
135#define DMA_AXIARCR_DRD_INDEX 4
136#define DMA_AXIARCR_DRD_WIDTH 2
137#define DMA_AXIARCR_TEC_INDEX 8
138#define DMA_AXIARCR_TEC_WIDTH 4
139#define DMA_AXIARCR_TED_INDEX 12
140#define DMA_AXIARCR_TED_WIDTH 2
141#define DMA_AXIARCR_THC_INDEX 16
142#define DMA_AXIARCR_THC_WIDTH 4
143#define DMA_AXIARCR_THD_INDEX 20
144#define DMA_AXIARCR_THD_WIDTH 2
145#define DMA_AXIAWCR_DWC_INDEX 0
146#define DMA_AXIAWCR_DWC_WIDTH 4
147#define DMA_AXIAWCR_DWD_INDEX 4
148#define DMA_AXIAWCR_DWD_WIDTH 2
149#define DMA_AXIAWCR_RPC_INDEX 8
150#define DMA_AXIAWCR_RPC_WIDTH 4
151#define DMA_AXIAWCR_RPD_INDEX 12
152#define DMA_AXIAWCR_RPD_WIDTH 2
153#define DMA_AXIAWCR_RHC_INDEX 16
154#define DMA_AXIAWCR_RHC_WIDTH 4
155#define DMA_AXIAWCR_RHD_INDEX 20
156#define DMA_AXIAWCR_RHD_WIDTH 2
157#define DMA_AXIAWCR_TDC_INDEX 24
158#define DMA_AXIAWCR_TDC_WIDTH 4
159#define DMA_AXIAWCR_TDD_INDEX 28
160#define DMA_AXIAWCR_TDD_WIDTH 2
161#define DMA_DSR0_RPS_INDEX 8
162#define DMA_DSR0_RPS_WIDTH 4
163#define DMA_DSR0_TPS_INDEX 12
164#define DMA_DSR0_TPS_WIDTH 4
165#define DMA_ISR_MACIS_INDEX 17
166#define DMA_ISR_MACIS_WIDTH 1
167#define DMA_ISR_MTLIS_INDEX 16
168#define DMA_ISR_MTLIS_WIDTH 1
169#define DMA_MR_SWR_INDEX 0
170#define DMA_MR_SWR_WIDTH 1
171#define DMA_SBMR_EAME_INDEX 11
172#define DMA_SBMR_EAME_WIDTH 1
173#define DMA_SBMR_UNDEF_INDEX 0
174#define DMA_SBMR_UNDEF_WIDTH 1
175
176/* DMA channel register offsets
177 * Multiple channels can be active. The first channel has registers
178 * that begin at 0x3100. Each subsequent channel has registers that
179 * are accessed using an offset of 0x80 from the previous channel.
180 */
181#define DMA_CH_BASE 0x3100
182#define DMA_CH_INC 0x80
183
184#define DMA_CH_CR 0x00
185#define DMA_CH_TCR 0x04
186#define DMA_CH_RCR 0x08
187#define DMA_CH_TDLR_HI 0x10
188#define DMA_CH_TDLR_LO 0x14
189#define DMA_CH_RDLR_HI 0x18
190#define DMA_CH_RDLR_LO 0x1c
191#define DMA_CH_TDTR_LO 0x24
192#define DMA_CH_RDTR_LO 0x2c
193#define DMA_CH_TDRLR 0x30
194#define DMA_CH_RDRLR 0x34
195#define DMA_CH_IER 0x38
196#define DMA_CH_RIWT 0x3c
197#define DMA_CH_CATDR_LO 0x44
198#define DMA_CH_CARDR_LO 0x4c
199#define DMA_CH_CATBR_HI 0x50
200#define DMA_CH_CATBR_LO 0x54
201#define DMA_CH_CARBR_HI 0x58
202#define DMA_CH_CARBR_LO 0x5c
203#define DMA_CH_SR 0x60
204
205/* DMA channel register entry bit positions and sizes */
206#define DMA_CH_CR_PBLX8_INDEX 16
207#define DMA_CH_CR_PBLX8_WIDTH 1
208#define DMA_CH_IER_AIE_INDEX 15
209#define DMA_CH_IER_AIE_WIDTH 1
210#define DMA_CH_IER_FBEE_INDEX 12
211#define DMA_CH_IER_FBEE_WIDTH 1
212#define DMA_CH_IER_NIE_INDEX 16
213#define DMA_CH_IER_NIE_WIDTH 1
214#define DMA_CH_IER_RBUE_INDEX 7
215#define DMA_CH_IER_RBUE_WIDTH 1
216#define DMA_CH_IER_RIE_INDEX 6
217#define DMA_CH_IER_RIE_WIDTH 1
218#define DMA_CH_IER_RSE_INDEX 8
219#define DMA_CH_IER_RSE_WIDTH 1
220#define DMA_CH_IER_TBUE_INDEX 2
221#define DMA_CH_IER_TBUE_WIDTH 1
222#define DMA_CH_IER_TIE_INDEX 0
223#define DMA_CH_IER_TIE_WIDTH 1
224#define DMA_CH_IER_TXSE_INDEX 1
225#define DMA_CH_IER_TXSE_WIDTH 1
226#define DMA_CH_RCR_PBL_INDEX 16
227#define DMA_CH_RCR_PBL_WIDTH 6
228#define DMA_CH_RCR_RBSZ_INDEX 1
229#define DMA_CH_RCR_RBSZ_WIDTH 14
230#define DMA_CH_RCR_SR_INDEX 0
231#define DMA_CH_RCR_SR_WIDTH 1
232#define DMA_CH_RIWT_RWT_INDEX 0
233#define DMA_CH_RIWT_RWT_WIDTH 8
234#define DMA_CH_SR_FBE_INDEX 12
235#define DMA_CH_SR_FBE_WIDTH 1
236#define DMA_CH_SR_RBU_INDEX 7
237#define DMA_CH_SR_RBU_WIDTH 1
238#define DMA_CH_SR_RI_INDEX 6
239#define DMA_CH_SR_RI_WIDTH 1
240#define DMA_CH_SR_RPS_INDEX 8
241#define DMA_CH_SR_RPS_WIDTH 1
242#define DMA_CH_SR_TBU_INDEX 2
243#define DMA_CH_SR_TBU_WIDTH 1
244#define DMA_CH_SR_TI_INDEX 0
245#define DMA_CH_SR_TI_WIDTH 1
246#define DMA_CH_SR_TPS_INDEX 1
247#define DMA_CH_SR_TPS_WIDTH 1
248#define DMA_CH_TCR_OSP_INDEX 4
249#define DMA_CH_TCR_OSP_WIDTH 1
250#define DMA_CH_TCR_PBL_INDEX 16
251#define DMA_CH_TCR_PBL_WIDTH 6
252#define DMA_CH_TCR_ST_INDEX 0
253#define DMA_CH_TCR_ST_WIDTH 1
254#define DMA_CH_TCR_TSE_INDEX 12
255#define DMA_CH_TCR_TSE_WIDTH 1
256
257/* DMA channel register values */
258#define DMA_OSP_DISABLE 0x00
259#define DMA_OSP_ENABLE 0x01
260#define DMA_PBL_1 1
261#define DMA_PBL_2 2
262#define DMA_PBL_4 4
263#define DMA_PBL_8 8
264#define DMA_PBL_16 16
265#define DMA_PBL_32 32
266#define DMA_PBL_64 64 /* 8 x 8 */
267#define DMA_PBL_128 128 /* 8 x 16 */
268#define DMA_PBL_256 256 /* 8 x 32 */
269#define DMA_PBL_X8_DISABLE 0x00
270#define DMA_PBL_X8_ENABLE 0x01
271
272
273/* MAC register offsets */
274#define MAC_TCR 0x0000
275#define MAC_RCR 0x0004
276#define MAC_PFR 0x0008
277#define MAC_WTR 0x000c
278#define MAC_HTR0 0x0010
279#define MAC_HTR1 0x0014
280#define MAC_HTR2 0x0018
281#define MAC_HTR3 0x001c
282#define MAC_HTR4 0x0020
283#define MAC_HTR5 0x0024
284#define MAC_HTR6 0x0028
285#define MAC_HTR7 0x002c
286#define MAC_VLANTR 0x0050
287#define MAC_VLANHTR 0x0058
288#define MAC_VLANIR 0x0060
289#define MAC_IVLANIR 0x0064
290#define MAC_RETMR 0x006c
291#define MAC_Q0TFCR 0x0070
292#define MAC_RFCR 0x0090
293#define MAC_RQC0R 0x00a0
294#define MAC_RQC1R 0x00a4
295#define MAC_RQC2R 0x00a8
296#define MAC_RQC3R 0x00ac
297#define MAC_ISR 0x00b0
298#define MAC_IER 0x00b4
299#define MAC_RTSR 0x00b8
300#define MAC_PMTCSR 0x00c0
301#define MAC_RWKPFR 0x00c4
302#define MAC_LPICSR 0x00d0
303#define MAC_LPITCR 0x00d4
304#define MAC_VR 0x0110
305#define MAC_DR 0x0114
306#define MAC_HWF0R 0x011c
307#define MAC_HWF1R 0x0120
308#define MAC_HWF2R 0x0124
309#define MAC_GPIOCR 0x0278
310#define MAC_GPIOSR 0x027c
311#define MAC_MACA0HR 0x0300
312#define MAC_MACA0LR 0x0304
313#define MAC_MACA1HR 0x0308
314#define MAC_MACA1LR 0x030c
315
316#define MAC_QTFCR_INC 4
317#define MAC_MACA_INC 4
318
319/* MAC register entry bit positions and sizes */
320#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
321#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
322#define MAC_HWF0R_ARPOFFSEL_INDEX 9
323#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
324#define MAC_HWF0R_EEESEL_INDEX 13
325#define MAC_HWF0R_EEESEL_WIDTH 1
326#define MAC_HWF0R_GMIISEL_INDEX 1
327#define MAC_HWF0R_GMIISEL_WIDTH 1
328#define MAC_HWF0R_MGKSEL_INDEX 7
329#define MAC_HWF0R_MGKSEL_WIDTH 1
330#define MAC_HWF0R_MMCSEL_INDEX 8
331#define MAC_HWF0R_MMCSEL_WIDTH 1
332#define MAC_HWF0R_RWKSEL_INDEX 6
333#define MAC_HWF0R_RWKSEL_WIDTH 1
334#define MAC_HWF0R_RXCOESEL_INDEX 16
335#define MAC_HWF0R_RXCOESEL_WIDTH 1
336#define MAC_HWF0R_SAVLANINS_INDEX 27
337#define MAC_HWF0R_SAVLANINS_WIDTH 1
338#define MAC_HWF0R_SMASEL_INDEX 5
339#define MAC_HWF0R_SMASEL_WIDTH 1
340#define MAC_HWF0R_TSSEL_INDEX 12
341#define MAC_HWF0R_TSSEL_WIDTH 1
342#define MAC_HWF0R_TSSTSSEL_INDEX 25
343#define MAC_HWF0R_TSSTSSEL_WIDTH 2
344#define MAC_HWF0R_TXCOESEL_INDEX 14
345#define MAC_HWF0R_TXCOESEL_WIDTH 1
346#define MAC_HWF0R_VLHASH_INDEX 4
347#define MAC_HWF0R_VLHASH_WIDTH 1
348#define MAC_HWF1R_ADVTHWORD_INDEX 13
349#define MAC_HWF1R_ADVTHWORD_WIDTH 1
350#define MAC_HWF1R_DBGMEMA_INDEX 19
351#define MAC_HWF1R_DBGMEMA_WIDTH 1
352#define MAC_HWF1R_DCBEN_INDEX 16
353#define MAC_HWF1R_DCBEN_WIDTH 1
354#define MAC_HWF1R_HASHTBLSZ_INDEX 24
355#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
356#define MAC_HWF1R_L3L4FNUM_INDEX 27
357#define MAC_HWF1R_L3L4FNUM_WIDTH 4
358#define MAC_HWF1R_RSSEN_INDEX 20
359#define MAC_HWF1R_RSSEN_WIDTH 1
360#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
361#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
362#define MAC_HWF1R_SPHEN_INDEX 17
363#define MAC_HWF1R_SPHEN_WIDTH 1
364#define MAC_HWF1R_TSOEN_INDEX 18
365#define MAC_HWF1R_TSOEN_WIDTH 1
366#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
367#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
368#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
369#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
370#define MAC_HWF2R_PPSOUTNUM_INDEX 24
371#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
372#define MAC_HWF2R_RXCHCNT_INDEX 12
373#define MAC_HWF2R_RXCHCNT_WIDTH 4
374#define MAC_HWF2R_RXQCNT_INDEX 0
375#define MAC_HWF2R_RXQCNT_WIDTH 4
376#define MAC_HWF2R_TXCHCNT_INDEX 18
377#define MAC_HWF2R_TXCHCNT_WIDTH 4
378#define MAC_HWF2R_TXQCNT_INDEX 6
379#define MAC_HWF2R_TXQCNT_WIDTH 4
380#define MAC_ISR_MMCRXIS_INDEX 9
381#define MAC_ISR_MMCRXIS_WIDTH 1
382#define MAC_ISR_MMCTXIS_INDEX 10
383#define MAC_ISR_MMCTXIS_WIDTH 1
384#define MAC_ISR_PMTIS_INDEX 4
385#define MAC_ISR_PMTIS_WIDTH 1
386#define MAC_MACA1HR_AE_INDEX 31
387#define MAC_MACA1HR_AE_WIDTH 1
388#define MAC_PFR_HMC_INDEX 2
389#define MAC_PFR_HMC_WIDTH 1
390#define MAC_PFR_HUC_INDEX 1
391#define MAC_PFR_HUC_WIDTH 1
392#define MAC_PFR_PM_INDEX 4
393#define MAC_PFR_PM_WIDTH 1
394#define MAC_PFR_PR_INDEX 0
395#define MAC_PFR_PR_WIDTH 1
396#define MAC_PMTCSR_MGKPKTEN_INDEX 1
397#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
398#define MAC_PMTCSR_PWRDWN_INDEX 0
399#define MAC_PMTCSR_PWRDWN_WIDTH 1
400#define MAC_PMTCSR_RWKFILTRST_INDEX 31
401#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
402#define MAC_PMTCSR_RWKPKTEN_INDEX 2
403#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
404#define MAC_Q0TFCR_PT_INDEX 16
405#define MAC_Q0TFCR_PT_WIDTH 16
406#define MAC_Q0TFCR_TFE_INDEX 1
407#define MAC_Q0TFCR_TFE_WIDTH 1
408#define MAC_RCR_ACS_INDEX 1
409#define MAC_RCR_ACS_WIDTH 1
410#define MAC_RCR_CST_INDEX 2
411#define MAC_RCR_CST_WIDTH 1
412#define MAC_RCR_DCRCC_INDEX 3
413#define MAC_RCR_DCRCC_WIDTH 1
414#define MAC_RCR_IPC_INDEX 9
415#define MAC_RCR_IPC_WIDTH 1
416#define MAC_RCR_JE_INDEX 8
417#define MAC_RCR_JE_WIDTH 1
418#define MAC_RCR_LM_INDEX 10
419#define MAC_RCR_LM_WIDTH 1
420#define MAC_RCR_RE_INDEX 0
421#define MAC_RCR_RE_WIDTH 1
422#define MAC_RFCR_RFE_INDEX 0
423#define MAC_RFCR_RFE_WIDTH 1
424#define MAC_RQC0R_RXQ0EN_INDEX 0
425#define MAC_RQC0R_RXQ0EN_WIDTH 2
426#define MAC_TCR_SS_INDEX 29
427#define MAC_TCR_SS_WIDTH 2
428#define MAC_TCR_TE_INDEX 0
429#define MAC_TCR_TE_WIDTH 1
430#define MAC_VLANTR_DOVLTC_INDEX 20
431#define MAC_VLANTR_DOVLTC_WIDTH 1
432#define MAC_VLANTR_ERSVLM_INDEX 19
433#define MAC_VLANTR_ERSVLM_WIDTH 1
434#define MAC_VLANTR_ESVL_INDEX 18
435#define MAC_VLANTR_ESVL_WIDTH 1
436#define MAC_VLANTR_EVLS_INDEX 21
437#define MAC_VLANTR_EVLS_WIDTH 2
438#define MAC_VLANTR_EVLRXS_INDEX 24
439#define MAC_VLANTR_EVLRXS_WIDTH 1
440#define MAC_VR_DEVID_INDEX 8
441#define MAC_VR_DEVID_WIDTH 8
442#define MAC_VR_SNPSVER_INDEX 0
443#define MAC_VR_SNPSVER_WIDTH 8
444#define MAC_VR_USERVER_INDEX 16
445#define MAC_VR_USERVER_WIDTH 8
446
447/* MMC register offsets */
448#define MMC_CR 0x0800
449#define MMC_RISR 0x0804
450#define MMC_TISR 0x0808
451#define MMC_RIER 0x080c
452#define MMC_TIER 0x0810
453#define MMC_TXOCTETCOUNT_GB_LO 0x0814
454#define MMC_TXOCTETCOUNT_GB_HI 0x0818
455#define MMC_TXFRAMECOUNT_GB_LO 0x081c
456#define MMC_TXFRAMECOUNT_GB_HI 0x0820
457#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
458#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
459#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
460#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
461#define MMC_TX64OCTETS_GB_LO 0x0834
462#define MMC_TX64OCTETS_GB_HI 0x0838
463#define MMC_TX65TO127OCTETS_GB_LO 0x083c
464#define MMC_TX65TO127OCTETS_GB_HI 0x0840
465#define MMC_TX128TO255OCTETS_GB_LO 0x0844
466#define MMC_TX128TO255OCTETS_GB_HI 0x0848
467#define MMC_TX256TO511OCTETS_GB_LO 0x084c
468#define MMC_TX256TO511OCTETS_GB_HI 0x0850
469#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
470#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
471#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
472#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
473#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
474#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
475#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
476#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
477#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
478#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
479#define MMC_TXUNDERFLOWERROR_LO 0x087c
480#define MMC_TXUNDERFLOWERROR_HI 0x0880
481#define MMC_TXOCTETCOUNT_G_LO 0x0884
482#define MMC_TXOCTETCOUNT_G_HI 0x0888
483#define MMC_TXFRAMECOUNT_G_LO 0x088c
484#define MMC_TXFRAMECOUNT_G_HI 0x0890
485#define MMC_TXPAUSEFRAMES_LO 0x0894
486#define MMC_TXPAUSEFRAMES_HI 0x0898
487#define MMC_TXVLANFRAMES_G_LO 0x089c
488#define MMC_TXVLANFRAMES_G_HI 0x08a0
489#define MMC_RXFRAMECOUNT_GB_LO 0x0900
490#define MMC_RXFRAMECOUNT_GB_HI 0x0904
491#define MMC_RXOCTETCOUNT_GB_LO 0x0908
492#define MMC_RXOCTETCOUNT_GB_HI 0x090c
493#define MMC_RXOCTETCOUNT_G_LO 0x0910
494#define MMC_RXOCTETCOUNT_G_HI 0x0914
495#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
496#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
497#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
498#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
499#define MMC_RXCRCERROR_LO 0x0928
500#define MMC_RXCRCERROR_HI 0x092c
501#define MMC_RXRUNTERROR 0x0930
502#define MMC_RXJABBERERROR 0x0934
503#define MMC_RXUNDERSIZE_G 0x0938
504#define MMC_RXOVERSIZE_G 0x093c
505#define MMC_RX64OCTETS_GB_LO 0x0940
506#define MMC_RX64OCTETS_GB_HI 0x0944
507#define MMC_RX65TO127OCTETS_GB_LO 0x0948
508#define MMC_RX65TO127OCTETS_GB_HI 0x094c
509#define MMC_RX128TO255OCTETS_GB_LO 0x0950
510#define MMC_RX128TO255OCTETS_GB_HI 0x0954
511#define MMC_RX256TO511OCTETS_GB_LO 0x0958
512#define MMC_RX256TO511OCTETS_GB_HI 0x095c
513#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
514#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
515#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
516#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
517#define MMC_RXUNICASTFRAMES_G_LO 0x0970
518#define MMC_RXUNICASTFRAMES_G_HI 0x0974
519#define MMC_RXLENGTHERROR_LO 0x0978
520#define MMC_RXLENGTHERROR_HI 0x097c
521#define MMC_RXOUTOFRANGETYPE_LO 0x0980
522#define MMC_RXOUTOFRANGETYPE_HI 0x0984
523#define MMC_RXPAUSEFRAMES_LO 0x0988
524#define MMC_RXPAUSEFRAMES_HI 0x098c
525#define MMC_RXFIFOOVERFLOW_LO 0x0990
526#define MMC_RXFIFOOVERFLOW_HI 0x0994
527#define MMC_RXVLANFRAMES_GB_LO 0x0998
528#define MMC_RXVLANFRAMES_GB_HI 0x099c
529#define MMC_RXWATCHDOGERROR 0x09a0
530
531/* MMC register entry bit positions and sizes */
532#define MMC_CR_CR_INDEX 0
533#define MMC_CR_CR_WIDTH 1
534#define MMC_CR_CSR_INDEX 1
535#define MMC_CR_CSR_WIDTH 1
536#define MMC_CR_ROR_INDEX 2
537#define MMC_CR_ROR_WIDTH 1
538#define MMC_CR_MCF_INDEX 3
539#define MMC_CR_MCF_WIDTH 1
540#define MMC_CR_MCT_INDEX 4
541#define MMC_CR_MCT_WIDTH 2
542#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
543#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
544#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
545#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
546#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
547#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
548#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
549#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
550#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
551#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
552#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
553#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
554#define MMC_RISR_RXCRCERROR_INDEX 5
555#define MMC_RISR_RXCRCERROR_WIDTH 1
556#define MMC_RISR_RXRUNTERROR_INDEX 6
557#define MMC_RISR_RXRUNTERROR_WIDTH 1
558#define MMC_RISR_RXJABBERERROR_INDEX 7
559#define MMC_RISR_RXJABBERERROR_WIDTH 1
560#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
561#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
562#define MMC_RISR_RXOVERSIZE_G_INDEX 9
563#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
564#define MMC_RISR_RX64OCTETS_GB_INDEX 10
565#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
566#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
567#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
568#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
569#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
570#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
571#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
572#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
573#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
574#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
575#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
576#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
577#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
578#define MMC_RISR_RXLENGTHERROR_INDEX 17
579#define MMC_RISR_RXLENGTHERROR_WIDTH 1
580#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
581#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
582#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
583#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
584#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
585#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
586#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
587#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
588#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
589#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
590#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
591#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
592#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
593#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
594#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
595#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
596#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
597#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
598#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
599#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
600#define MMC_TISR_TX64OCTETS_GB_INDEX 4
601#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
602#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
603#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
604#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
605#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
606#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
607#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
608#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
609#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
610#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
611#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
612#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
613#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
614#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
615#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
616#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
617#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
618#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
619#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
620#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
621#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
622#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
623#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
624#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
625#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
626#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
627#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
628
629/* MTL register offsets */
630#define MTL_OMR 0x1000
631#define MTL_FDCR 0x1008
632#define MTL_FDSR 0x100c
633#define MTL_FDDR 0x1010
634#define MTL_ISR 0x1020
635#define MTL_RQDCM0R 0x1030
636#define MTL_TCPM0R 0x1040
637#define MTL_TCPM1R 0x1044
638
639#define MTL_RQDCM_INC 4
640#define MTL_RQDCM_Q_PER_REG 4
641
642/* MTL register entry bit positions and sizes */
643#define MTL_OMR_ETSALG_INDEX 5
644#define MTL_OMR_ETSALG_WIDTH 2
645#define MTL_OMR_RAA_INDEX 2
646#define MTL_OMR_RAA_WIDTH 1
647
648/* MTL queue register offsets
649 * Multiple queues can be active. The first queue has registers
650 * that begin at 0x1100. Each subsequent queue has registers that
651 * are accessed using an offset of 0x80 from the previous queue.
652 */
653#define MTL_Q_BASE 0x1100
654#define MTL_Q_INC 0x80
655
656#define MTL_Q_TQOMR 0x00
657#define MTL_Q_TQUR 0x04
658#define MTL_Q_TQDR 0x08
659#define MTL_Q_TCECR 0x10
660#define MTL_Q_TCESR 0x14
661#define MTL_Q_TCQWR 0x18
662#define MTL_Q_RQOMR 0x40
663#define MTL_Q_RQMPOCR 0x44
664#define MTL_Q_RQDR 0x4c
665#define MTL_Q_IER 0x70
666#define MTL_Q_ISR 0x74
667
668/* MTL queue register entry bit positions and sizes */
669#define MTL_Q_TCQWR_QW_INDEX 0
670#define MTL_Q_TCQWR_QW_WIDTH 21
671#define MTL_Q_RQOMR_EHFC_INDEX 7
672#define MTL_Q_RQOMR_EHFC_WIDTH 1
673#define MTL_Q_RQOMR_RFA_INDEX 8
674#define MTL_Q_RQOMR_RFA_WIDTH 3
675#define MTL_Q_RQOMR_RFD_INDEX 13
676#define MTL_Q_RQOMR_RFD_WIDTH 3
677#define MTL_Q_RQOMR_RQS_INDEX 16
678#define MTL_Q_RQOMR_RQS_WIDTH 9
679#define MTL_Q_RQOMR_RSF_INDEX 5
680#define MTL_Q_RQOMR_RSF_WIDTH 1
681#define MTL_Q_RQOMR_RTC_INDEX 0
682#define MTL_Q_RQOMR_RTC_WIDTH 2
683#define MTL_Q_TQOMR_FTQ_INDEX 0
684#define MTL_Q_TQOMR_FTQ_WIDTH 1
685#define MTL_Q_TQOMR_TQS_INDEX 16
686#define MTL_Q_TQOMR_TQS_WIDTH 10
687#define MTL_Q_TQOMR_TSF_INDEX 1
688#define MTL_Q_TQOMR_TSF_WIDTH 1
689#define MTL_Q_TQOMR_TTC_INDEX 4
690#define MTL_Q_TQOMR_TTC_WIDTH 3
691#define MTL_Q_TQOMR_TXQEN_INDEX 2
692#define MTL_Q_TQOMR_TXQEN_WIDTH 2
693
694/* MTL queue register value */
695#define MTL_RSF_DISABLE 0x00
696#define MTL_RSF_ENABLE 0x01
697#define MTL_TSF_DISABLE 0x00
698#define MTL_TSF_ENABLE 0x01
699
700#define MTL_RX_THRESHOLD_64 0x00
701#define MTL_RX_THRESHOLD_96 0x02
702#define MTL_RX_THRESHOLD_128 0x03
703#define MTL_TX_THRESHOLD_32 0x01
704#define MTL_TX_THRESHOLD_64 0x00
705#define MTL_TX_THRESHOLD_96 0x02
706#define MTL_TX_THRESHOLD_128 0x03
707#define MTL_TX_THRESHOLD_192 0x04
708#define MTL_TX_THRESHOLD_256 0x05
709#define MTL_TX_THRESHOLD_384 0x06
710#define MTL_TX_THRESHOLD_512 0x07
711
712#define MTL_ETSALG_WRR 0x00
713#define MTL_ETSALG_WFQ 0x01
714#define MTL_ETSALG_DWRR 0x02
715#define MTL_RAA_SP 0x00
716#define MTL_RAA_WSP 0x01
717
718#define MTL_Q_DISABLED 0x00
719#define MTL_Q_ENABLED 0x02
720
721
722/* MTL traffic class register offsets
723 * Multiple traffic classes can be active. The first class has registers
724 * that begin at 0x1100. Each subsequent queue has registers that
725 * are accessed using an offset of 0x80 from the previous queue.
726 */
727#define MTL_TC_BASE MTL_Q_BASE
728#define MTL_TC_INC MTL_Q_INC
729
730#define MTL_TC_ETSCR 0x10
731
732/* MTL traffic class register entry bit positions and sizes */
733#define MTL_TC_ETSCR_TSA_INDEX 0
734#define MTL_TC_ETSCR_TSA_WIDTH 2
735
736/* MTL traffic class register value */
737#define MTL_TSA_SP 0x00
738#define MTL_TSA_ETS 0x02
739
740
741/* PCS MMD select register offset
742 * The MMD select register is used for accessing PCS registers
743 * when the underlying APB3 interface is using indirect addressing.
744 * Indirect addressing requires accessing registers in two phases,
745 * an address phase and a data phase. The address phases requires
746 * writing an address selection value to the MMD select regiesters.
747 */
748#define PCS_MMD_SELECT 0xff
749
750
751/* Descriptor/Packet entry bit positions and sizes */
752#define RX_PACKET_ERRORS_CRC_INDEX 2
753#define RX_PACKET_ERRORS_CRC_WIDTH 1
754#define RX_PACKET_ERRORS_FRAME_INDEX 3
755#define RX_PACKET_ERRORS_FRAME_WIDTH 1
756#define RX_PACKET_ERRORS_LENGTH_INDEX 0
757#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
758#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
759#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
760
761#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
762#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
763#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
764#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
765#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
766#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
767
768#define RX_NORMAL_DESC0_OVT_INDEX 0
769#define RX_NORMAL_DESC0_OVT_WIDTH 16
770#define RX_NORMAL_DESC3_ES_INDEX 15
771#define RX_NORMAL_DESC3_ES_WIDTH 1
772#define RX_NORMAL_DESC3_ETLT_INDEX 16
773#define RX_NORMAL_DESC3_ETLT_WIDTH 4
774#define RX_NORMAL_DESC3_INTE_INDEX 30
775#define RX_NORMAL_DESC3_INTE_WIDTH 1
776#define RX_NORMAL_DESC3_LD_INDEX 28
777#define RX_NORMAL_DESC3_LD_WIDTH 1
778#define RX_NORMAL_DESC3_OWN_INDEX 31
779#define RX_NORMAL_DESC3_OWN_WIDTH 1
780#define RX_NORMAL_DESC3_PL_INDEX 0
781#define RX_NORMAL_DESC3_PL_WIDTH 14
782
783#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
784#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
785#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
786#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
787#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
788#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
789
790#define TX_CONTEXT_DESC2_MSS_INDEX 0
791#define TX_CONTEXT_DESC2_MSS_WIDTH 15
792#define TX_CONTEXT_DESC3_CTXT_INDEX 30
793#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
794#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
795#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
796#define TX_CONTEXT_DESC3_VLTV_INDEX 16
797#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
798#define TX_CONTEXT_DESC3_VT_INDEX 0
799#define TX_CONTEXT_DESC3_VT_WIDTH 16
800
801#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
802#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
803#define TX_NORMAL_DESC2_IC_INDEX 31
804#define TX_NORMAL_DESC2_IC_WIDTH 1
805#define TX_NORMAL_DESC2_VTIR_INDEX 14
806#define TX_NORMAL_DESC2_VTIR_WIDTH 2
807#define TX_NORMAL_DESC3_CIC_INDEX 16
808#define TX_NORMAL_DESC3_CIC_WIDTH 2
809#define TX_NORMAL_DESC3_CPC_INDEX 26
810#define TX_NORMAL_DESC3_CPC_WIDTH 2
811#define TX_NORMAL_DESC3_CTXT_INDEX 30
812#define TX_NORMAL_DESC3_CTXT_WIDTH 1
813#define TX_NORMAL_DESC3_FD_INDEX 29
814#define TX_NORMAL_DESC3_FD_WIDTH 1
815#define TX_NORMAL_DESC3_FL_INDEX 0
816#define TX_NORMAL_DESC3_FL_WIDTH 15
817#define TX_NORMAL_DESC3_LD_INDEX 28
818#define TX_NORMAL_DESC3_LD_WIDTH 1
819#define TX_NORMAL_DESC3_OWN_INDEX 31
820#define TX_NORMAL_DESC3_OWN_WIDTH 1
821#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
822#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
823#define TX_NORMAL_DESC3_TCPPL_INDEX 0
824#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
825#define TX_NORMAL_DESC3_TSE_INDEX 18
826#define TX_NORMAL_DESC3_TSE_WIDTH 1
827
828#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
829
830/* MDIO undefined or vendor specific registers */
831#ifndef MDIO_AN_COMP_STAT
832#define MDIO_AN_COMP_STAT 0x0030
833#endif
834
835
836/* Bit setting and getting macros
837 * The get macro will extract the current bit field value from within
838 * the variable
839 *
840 * The set macro will clear the current bit field value within the
841 * variable and then set the bit field of the variable to the
842 * specified value
843 */
844#define GET_BITS(_var, _index, _width) \
845 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
846
847#define SET_BITS(_var, _index, _width, _val) \
848do { \
849 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
850 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
851} while (0)
852
853#define GET_BITS_LE(_var, _index, _width) \
854 ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
855
856#define SET_BITS_LE(_var, _index, _width, _val) \
857do { \
858 (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
859 (_var) |= cpu_to_le32((((_val) & \
860 ((0x1 << (_width)) - 1)) << (_index))); \
861} while (0)
862
863
864/* Bit setting and getting macros based on register fields
865 * The get macro uses the bit field definitions formed using the input
866 * names to extract the current bit field value from within the
867 * variable
868 *
869 * The set macro uses the bit field definitions formed using the input
870 * names to set the bit field of the variable to the specified value
871 */
872#define XGMAC_GET_BITS(_var, _prefix, _field) \
873 GET_BITS((_var), \
874 _prefix##_##_field##_INDEX, \
875 _prefix##_##_field##_WIDTH)
876
877#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
878 SET_BITS((_var), \
879 _prefix##_##_field##_INDEX, \
880 _prefix##_##_field##_WIDTH, (_val))
881
882#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
883 GET_BITS_LE((_var), \
884 _prefix##_##_field##_INDEX, \
885 _prefix##_##_field##_WIDTH)
886
887#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
888 SET_BITS_LE((_var), \
889 _prefix##_##_field##_INDEX, \
890 _prefix##_##_field##_WIDTH, (_val))
891
892
893/* Macros for reading or writing registers
894 * The ioread macros will get bit fields or full values using the
895 * register definitions formed using the input names
896 *
897 * The iowrite macros will set bit fields or full values using the
898 * register definitions formed using the input names
899 */
900#define XGMAC_IOREAD(_pdata, _reg) \
901 ioread32((_pdata)->xgmac_regs + _reg)
902
903#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
904 GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
905 _reg##_##_field##_INDEX, \
906 _reg##_##_field##_WIDTH)
907
908#define XGMAC_IOWRITE(_pdata, _reg, _val) \
909 iowrite32((_val), (_pdata)->xgmac_regs + _reg)
910
911#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
912do { \
913 u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
914 SET_BITS(reg_val, \
915 _reg##_##_field##_INDEX, \
916 _reg##_##_field##_WIDTH, (_val)); \
917 XGMAC_IOWRITE((_pdata), _reg, reg_val); \
918} while (0)
919
920
921/* Macros for reading or writing MTL queue or traffic class registers
922 * Similar to the standard read and write macros except that the
923 * base register value is calculated by the queue or traffic class number
924 */
925#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
926 ioread32((_pdata)->xgmac_regs + \
927 MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
928
929#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
930 GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
931 _reg##_##_field##_INDEX, \
932 _reg##_##_field##_WIDTH)
933
934#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
935 iowrite32((_val), (_pdata)->xgmac_regs + \
936 MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
937
938#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
939do { \
940 u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
941 SET_BITS(reg_val, \
942 _reg##_##_field##_INDEX, \
943 _reg##_##_field##_WIDTH, (_val)); \
944 XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
945} while (0)
946
947
948/* Macros for reading or writing DMA channel registers
949 * Similar to the standard read and write macros except that the
950 * base register value is obtained from the ring
951 */
952#define XGMAC_DMA_IOREAD(_channel, _reg) \
953 ioread32((_channel)->dma_regs + _reg)
954
955#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
956 GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
957 _reg##_##_field##_INDEX, \
958 _reg##_##_field##_WIDTH)
959
960#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
961 iowrite32((_val), (_channel)->dma_regs + _reg)
962
963#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
964do { \
965 u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
966 SET_BITS(reg_val, \
967 _reg##_##_field##_INDEX, \
968 _reg##_##_field##_WIDTH, (_val)); \
969 XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
970} while (0)
971
972
973/* Macros for building, reading or writing register values or bits
974 * within the register values of XPCS registers.
975 */
976#define XPCS_IOWRITE(_pdata, _off, _val) \
977 iowrite32(_val, (_pdata)->xpcs_regs + (_off))
978
979#define XPCS_IOREAD(_pdata, _off) \
980 ioread32((_pdata)->xpcs_regs + (_off))
981
982
983/* Macros for building, reading or writing register values or bits
984 * using MDIO. Different from above because of the use of standardized
985 * Linux include values. No shifting is performed with the bit
986 * operations, everything works on mask values.
987 */
988#define XMDIO_READ(_pdata, _mmd, _reg) \
989 ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
990 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
991
992#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
993 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
994
995#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
996 ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
997 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
998
999#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
1000do { \
1001 u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
1002 mmd_val &= ~_mask; \
1003 mmd_val |= (_val); \
1004 XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
1005} while (0)
1006
1007#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000000..6bb76d5c817b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,375 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/debugfs.h>
118#include <linux/module.h>
119#include <linux/slab.h>
120
121#include "xgbe.h"
122#include "xgbe-common.h"
123
124
125static ssize_t xgbe_common_read(char __user *buffer, size_t count,
126 loff_t *ppos, unsigned int value)
127{
128 char *buf;
129 ssize_t len;
130
131 if (*ppos != 0)
132 return 0;
133
134 buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
135 if (!buf)
136 return -ENOMEM;
137
138 if (count < strlen(buf)) {
139 kfree(buf);
140 return -ENOSPC;
141 }
142
143 len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
144 kfree(buf);
145
146 return len;
147}
148
149static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
150 loff_t *ppos, unsigned int *value)
151{
152 char workarea[32];
153 ssize_t len;
154 unsigned int scan_value;
155
156 if (*ppos != 0)
157 return 0;
158
159 if (count >= sizeof(workarea))
160 return -ENOSPC;
161
162 len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
163 buffer, count);
164 if (len < 0)
165 return len;
166
167 workarea[len] = '\0';
168 if (sscanf(workarea, "%x", &scan_value) == 1)
169 *value = scan_value;
170 else
171 return -EIO;
172
173 return len;
174}
175
176static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
177 size_t count, loff_t *ppos)
178{
179 struct xgbe_prv_data *pdata = filp->private_data;
180
181 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
182}
183
184static ssize_t xgmac_reg_addr_write(struct file *filp,
185 const char __user *buffer,
186 size_t count, loff_t *ppos)
187{
188 struct xgbe_prv_data *pdata = filp->private_data;
189
190 return xgbe_common_write(buffer, count, ppos,
191 &pdata->debugfs_xgmac_reg);
192}
193
194static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
195 size_t count, loff_t *ppos)
196{
197 struct xgbe_prv_data *pdata = filp->private_data;
198 unsigned int value;
199
200 value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
201
202 return xgbe_common_read(buffer, count, ppos, value);
203}
204
205static ssize_t xgmac_reg_value_write(struct file *filp,
206 const char __user *buffer,
207 size_t count, loff_t *ppos)
208{
209 struct xgbe_prv_data *pdata = filp->private_data;
210 unsigned int value;
211 ssize_t len;
212
213 len = xgbe_common_write(buffer, count, ppos, &value);
214 if (len < 0)
215 return len;
216
217 XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
218
219 return len;
220}
221
222static const struct file_operations xgmac_reg_addr_fops = {
223 .owner = THIS_MODULE,
224 .open = simple_open,
225 .read = xgmac_reg_addr_read,
226 .write = xgmac_reg_addr_write,
227};
228
229static const struct file_operations xgmac_reg_value_fops = {
230 .owner = THIS_MODULE,
231 .open = simple_open,
232 .read = xgmac_reg_value_read,
233 .write = xgmac_reg_value_write,
234};
235
236static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
237 size_t count, loff_t *ppos)
238{
239 struct xgbe_prv_data *pdata = filp->private_data;
240
241 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
242}
243
244static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
245 size_t count, loff_t *ppos)
246{
247 struct xgbe_prv_data *pdata = filp->private_data;
248
249 return xgbe_common_write(buffer, count, ppos,
250 &pdata->debugfs_xpcs_mmd);
251}
252
253static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
254 size_t count, loff_t *ppos)
255{
256 struct xgbe_prv_data *pdata = filp->private_data;
257
258 return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
259}
260
261static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
262 size_t count, loff_t *ppos)
263{
264 struct xgbe_prv_data *pdata = filp->private_data;
265
266 return xgbe_common_write(buffer, count, ppos,
267 &pdata->debugfs_xpcs_reg);
268}
269
270static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
271 size_t count, loff_t *ppos)
272{
273 struct xgbe_prv_data *pdata = filp->private_data;
274 unsigned int value;
275
276 value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
277 pdata->debugfs_xpcs_reg);
278
279 return xgbe_common_read(buffer, count, ppos, value);
280}
281
282static ssize_t xpcs_reg_value_write(struct file *filp,
283 const char __user *buffer,
284 size_t count, loff_t *ppos)
285{
286 struct xgbe_prv_data *pdata = filp->private_data;
287 unsigned int value;
288 ssize_t len;
289
290 len = xgbe_common_write(buffer, count, ppos, &value);
291 if (len < 0)
292 return len;
293
294 pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
295 pdata->debugfs_xpcs_reg, value);
296
297 return len;
298}
299
300static const struct file_operations xpcs_mmd_fops = {
301 .owner = THIS_MODULE,
302 .open = simple_open,
303 .read = xpcs_mmd_read,
304 .write = xpcs_mmd_write,
305};
306
307static const struct file_operations xpcs_reg_addr_fops = {
308 .owner = THIS_MODULE,
309 .open = simple_open,
310 .read = xpcs_reg_addr_read,
311 .write = xpcs_reg_addr_write,
312};
313
314static const struct file_operations xpcs_reg_value_fops = {
315 .owner = THIS_MODULE,
316 .open = simple_open,
317 .read = xpcs_reg_value_read,
318 .write = xpcs_reg_value_write,
319};
320
321void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
322{
323 struct dentry *pfile;
324 char *buf;
325
326 /* Set defaults */
327 pdata->debugfs_xgmac_reg = 0;
328 pdata->debugfs_xpcs_mmd = 1;
329 pdata->debugfs_xpcs_reg = 0;
330
331 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
332 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
333 if (pdata->xgbe_debugfs == NULL) {
334 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
335 return;
336 }
337
338 pfile = debugfs_create_file("xgmac_register", 0600,
339 pdata->xgbe_debugfs, pdata,
340 &xgmac_reg_addr_fops);
341 if (!pfile)
342 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
343
344 pfile = debugfs_create_file("xgmac_register_value", 0600,
345 pdata->xgbe_debugfs, pdata,
346 &xgmac_reg_value_fops);
347 if (!pfile)
348 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
349
350 pfile = debugfs_create_file("xpcs_mmd", 0600,
351 pdata->xgbe_debugfs, pdata,
352 &xpcs_mmd_fops);
353 if (!pfile)
354 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
355
356 pfile = debugfs_create_file("xpcs_register", 0600,
357 pdata->xgbe_debugfs, pdata,
358 &xpcs_reg_addr_fops);
359 if (!pfile)
360 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
361
362 pfile = debugfs_create_file("xpcs_register_value", 0600,
363 pdata->xgbe_debugfs, pdata,
364 &xpcs_reg_value_fops);
365 if (!pfile)
366 netdev_err(pdata->netdev, "debugfs_create_file failed\n");
367
368 kfree(buf);
369}
370
371void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
372{
373 debugfs_remove_recursive(pdata->xgbe_debugfs);
374 pdata->xgbe_debugfs = NULL;
375}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000000..6f1c85956d50
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,556 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120
121static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
122
123static void xgbe_free_ring(struct xgbe_prv_data *pdata,
124 struct xgbe_ring *ring)
125{
126 struct xgbe_ring_data *rdata;
127 unsigned int i;
128
129 if (!ring)
130 return;
131
132 if (ring->rdata) {
133 for (i = 0; i < ring->rdesc_count; i++) {
134 rdata = GET_DESC_DATA(ring, i);
135 xgbe_unmap_skb(pdata, rdata);
136 }
137
138 kfree(ring->rdata);
139 ring->rdata = NULL;
140 }
141
142 if (ring->rdesc) {
143 dma_free_coherent(pdata->dev,
144 (sizeof(struct xgbe_ring_desc) *
145 ring->rdesc_count),
146 ring->rdesc, ring->rdesc_dma);
147 ring->rdesc = NULL;
148 }
149}
150
151static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
152{
153 struct xgbe_channel *channel;
154 unsigned int i;
155
156 DBGPR("-->xgbe_free_ring_resources\n");
157
158 channel = pdata->channel;
159 for (i = 0; i < pdata->channel_count; i++, channel++) {
160 xgbe_free_ring(pdata, channel->tx_ring);
161 xgbe_free_ring(pdata, channel->rx_ring);
162 }
163
164 DBGPR("<--xgbe_free_ring_resources\n");
165}
166
167static int xgbe_init_ring(struct xgbe_prv_data *pdata,
168 struct xgbe_ring *ring, unsigned int rdesc_count)
169{
170 DBGPR("-->xgbe_init_ring\n");
171
172 if (!ring)
173 return 0;
174
175 /* Descriptors */
176 ring->rdesc_count = rdesc_count;
177 ring->rdesc = dma_alloc_coherent(pdata->dev,
178 (sizeof(struct xgbe_ring_desc) *
179 rdesc_count), &ring->rdesc_dma,
180 GFP_KERNEL);
181 if (!ring->rdesc)
182 return -ENOMEM;
183
184 /* Descriptor information */
185 ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
186 GFP_KERNEL);
187 if (!ring->rdata)
188 return -ENOMEM;
189
190 DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
191 ring->rdesc, ring->rdesc_dma, ring->rdata);
192
193 DBGPR("<--xgbe_init_ring\n");
194
195 return 0;
196}
197
198static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
199{
200 struct xgbe_channel *channel;
201 unsigned int i;
202 int ret;
203
204 DBGPR("-->xgbe_alloc_ring_resources\n");
205
206 channel = pdata->channel;
207 for (i = 0; i < pdata->channel_count; i++, channel++) {
208 DBGPR(" %s - tx_ring:\n", channel->name);
209 ret = xgbe_init_ring(pdata, channel->tx_ring,
210 pdata->tx_desc_count);
211 if (ret) {
212 netdev_alert(pdata->netdev,
213 "error initializing Tx ring\n");
214 goto err_ring;
215 }
216
217 DBGPR(" %s - rx_ring:\n", channel->name);
218 ret = xgbe_init_ring(pdata, channel->rx_ring,
219 pdata->rx_desc_count);
220 if (ret) {
221 netdev_alert(pdata->netdev,
222 "error initializing Tx ring\n");
223 goto err_ring;
224 }
225 }
226
227 DBGPR("<--xgbe_alloc_ring_resources\n");
228
229 return 0;
230
231err_ring:
232 xgbe_free_ring_resources(pdata);
233
234 return ret;
235}
236
237static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
238{
239 struct xgbe_hw_if *hw_if = &pdata->hw_if;
240 struct xgbe_channel *channel;
241 struct xgbe_ring *ring;
242 struct xgbe_ring_data *rdata;
243 struct xgbe_ring_desc *rdesc;
244 dma_addr_t rdesc_dma;
245 unsigned int i, j;
246
247 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
248
249 channel = pdata->channel;
250 for (i = 0; i < pdata->channel_count; i++, channel++) {
251 ring = channel->tx_ring;
252 if (!ring)
253 break;
254
255 rdesc = ring->rdesc;
256 rdesc_dma = ring->rdesc_dma;
257
258 for (j = 0; j < ring->rdesc_count; j++) {
259 rdata = GET_DESC_DATA(ring, j);
260
261 rdata->rdesc = rdesc;
262 rdata->rdesc_dma = rdesc_dma;
263
264 rdesc++;
265 rdesc_dma += sizeof(struct xgbe_ring_desc);
266 }
267
268 ring->cur = 0;
269 ring->dirty = 0;
270 ring->tx.queue_stopped = 0;
271
272 hw_if->tx_desc_init(channel);
273 }
274
275 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
276}
277
278static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
279{
280 struct xgbe_hw_if *hw_if = &pdata->hw_if;
281 struct xgbe_channel *channel;
282 struct xgbe_ring *ring;
283 struct xgbe_ring_desc *rdesc;
284 struct xgbe_ring_data *rdata;
285 dma_addr_t rdesc_dma, skb_dma;
286 struct sk_buff *skb = NULL;
287 unsigned int i, j;
288
289 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
290
291 channel = pdata->channel;
292 for (i = 0; i < pdata->channel_count; i++, channel++) {
293 ring = channel->rx_ring;
294 if (!ring)
295 break;
296
297 rdesc = ring->rdesc;
298 rdesc_dma = ring->rdesc_dma;
299
300 for (j = 0; j < ring->rdesc_count; j++) {
301 rdata = GET_DESC_DATA(ring, j);
302
303 rdata->rdesc = rdesc;
304 rdata->rdesc_dma = rdesc_dma;
305
306 /* Allocate skb & assign to each rdesc */
307 skb = dev_alloc_skb(pdata->rx_buf_size);
308 if (skb == NULL)
309 break;
310 skb_dma = dma_map_single(pdata->dev, skb->data,
311 pdata->rx_buf_size,
312 DMA_FROM_DEVICE);
313 if (dma_mapping_error(pdata->dev, skb_dma)) {
314 netdev_alert(pdata->netdev,
315 "failed to do the dma map\n");
316 dev_kfree_skb_any(skb);
317 break;
318 }
319 rdata->skb = skb;
320 rdata->skb_dma = skb_dma;
321 rdata->skb_dma_len = pdata->rx_buf_size;
322
323 rdesc++;
324 rdesc_dma += sizeof(struct xgbe_ring_desc);
325 }
326
327 ring->cur = 0;
328 ring->dirty = 0;
329 ring->rx.realloc_index = 0;
330 ring->rx.realloc_threshold = 0;
331
332 hw_if->rx_desc_init(channel);
333 }
334
335 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
336}
337
338static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
339 struct xgbe_ring_data *rdata)
340{
341 if (rdata->skb_dma) {
342 if (rdata->mapped_as_page) {
343 dma_unmap_page(pdata->dev, rdata->skb_dma,
344 rdata->skb_dma_len, DMA_TO_DEVICE);
345 } else {
346 dma_unmap_single(pdata->dev, rdata->skb_dma,
347 rdata->skb_dma_len, DMA_TO_DEVICE);
348 }
349 rdata->skb_dma = 0;
350 rdata->skb_dma_len = 0;
351 }
352
353 if (rdata->skb) {
354 dev_kfree_skb_any(rdata->skb);
355 rdata->skb = NULL;
356 }
357
358 rdata->tso_header = 0;
359 rdata->len = 0;
360 rdata->interrupt = 0;
361 rdata->mapped_as_page = 0;
362}
363
364static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
365{
366 struct xgbe_prv_data *pdata = channel->pdata;
367 struct xgbe_ring *ring = channel->tx_ring;
368 struct xgbe_ring_data *rdata;
369 struct xgbe_packet_data *packet;
370 struct skb_frag_struct *frag;
371 dma_addr_t skb_dma;
372 unsigned int start_index, cur_index;
373 unsigned int offset, tso, vlan, datalen, len;
374 unsigned int i;
375
376 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
377
378 offset = 0;
379 start_index = ring->cur;
380 cur_index = ring->cur;
381
382 packet = &ring->packet_data;
383 packet->rdesc_count = 0;
384 packet->length = 0;
385
386 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
387 TSO_ENABLE);
388 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
389 VLAN_CTAG);
390
391 /* Save space for a context descriptor if needed */
392 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
393 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
394 cur_index++;
395 rdata = GET_DESC_DATA(ring, cur_index);
396
397 if (tso) {
398 DBGPR(" TSO packet\n");
399
400 /* Map the TSO header */
401 skb_dma = dma_map_single(pdata->dev, skb->data,
402 packet->header_len, DMA_TO_DEVICE);
403 if (dma_mapping_error(pdata->dev, skb_dma)) {
404 netdev_alert(pdata->netdev, "dma_map_single failed\n");
405 goto err_out;
406 }
407 rdata->skb_dma = skb_dma;
408 rdata->skb_dma_len = packet->header_len;
409 rdata->tso_header = 1;
410
411 offset = packet->header_len;
412
413 packet->length += packet->header_len;
414
415 cur_index++;
416 rdata = GET_DESC_DATA(ring, cur_index);
417 }
418
419 /* Map the (remainder of the) packet */
420 for (datalen = skb_headlen(skb) - offset; datalen; ) {
421 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
422
423 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
424 DMA_TO_DEVICE);
425 if (dma_mapping_error(pdata->dev, skb_dma)) {
426 netdev_alert(pdata->netdev, "dma_map_single failed\n");
427 goto err_out;
428 }
429 rdata->skb_dma = skb_dma;
430 rdata->skb_dma_len = len;
431 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
432 cur_index, skb_dma, len);
433
434 datalen -= len;
435 offset += len;
436
437 packet->length += len;
438
439 cur_index++;
440 rdata = GET_DESC_DATA(ring, cur_index);
441 }
442
443 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
444 DBGPR(" mapping frag %u\n", i);
445
446 frag = &skb_shinfo(skb)->frags[i];
447 offset = 0;
448
449 for (datalen = skb_frag_size(frag); datalen; ) {
450 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
451
452 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
453 len, DMA_TO_DEVICE);
454 if (dma_mapping_error(pdata->dev, skb_dma)) {
455 netdev_alert(pdata->netdev,
456 "skb_frag_dma_map failed\n");
457 goto err_out;
458 }
459 rdata->skb_dma = skb_dma;
460 rdata->skb_dma_len = len;
461 rdata->mapped_as_page = 1;
462 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
463 cur_index, skb_dma, len);
464
465 datalen -= len;
466 offset += len;
467
468 packet->length += len;
469
470 cur_index++;
471 rdata = GET_DESC_DATA(ring, cur_index);
472 }
473 }
474
475 /* Save the skb address in the last entry */
476 rdata->skb = skb;
477
478 /* Save the number of descriptor entries used */
479 packet->rdesc_count = cur_index - start_index;
480
481 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
482
483 return packet->rdesc_count;
484
485err_out:
486 while (start_index < cur_index) {
487 rdata = GET_DESC_DATA(ring, start_index++);
488 xgbe_unmap_skb(pdata, rdata);
489 }
490
491 DBGPR("<--xgbe_map_tx_skb: count=0\n");
492
493 return 0;
494}
495
496static void xgbe_realloc_skb(struct xgbe_channel *channel)
497{
498 struct xgbe_prv_data *pdata = channel->pdata;
499 struct xgbe_hw_if *hw_if = &pdata->hw_if;
500 struct xgbe_ring *ring = channel->rx_ring;
501 struct xgbe_ring_data *rdata;
502 struct sk_buff *skb = NULL;
503 dma_addr_t skb_dma;
504 int i;
505
506 DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
507 ring->rx.realloc_index);
508
509 for (i = 0; i < ring->dirty; i++) {
510 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
511
512 /* Reset rdata values */
513 xgbe_unmap_skb(pdata, rdata);
514
515 /* Allocate skb & assign to each rdesc */
516 skb = dev_alloc_skb(pdata->rx_buf_size);
517 if (skb == NULL) {
518 netdev_alert(pdata->netdev,
519 "failed to allocate skb\n");
520 break;
521 }
522 skb_dma = dma_map_single(pdata->dev, skb->data,
523 pdata->rx_buf_size, DMA_FROM_DEVICE);
524 if (dma_mapping_error(pdata->dev, skb_dma)) {
525 netdev_alert(pdata->netdev,
526 "failed to do the dma map\n");
527 dev_kfree_skb_any(skb);
528 break;
529 }
530 rdata->skb = skb;
531 rdata->skb_dma = skb_dma;
532 rdata->skb_dma_len = pdata->rx_buf_size;
533
534 hw_if->rx_desc_reset(rdata);
535
536 ring->rx.realloc_index++;
537 }
538 ring->dirty = 0;
539
540 DBGPR("<--xgbe_realloc_skb\n");
541}
542
543void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
544{
545 DBGPR("-->xgbe_init_function_ptrs_desc\n");
546
547 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
548 desc_if->free_ring_resources = xgbe_free_ring_resources;
549 desc_if->map_tx_skb = xgbe_map_tx_skb;
550 desc_if->realloc_skb = xgbe_realloc_skb;
551 desc_if->unmap_skb = xgbe_unmap_skb;
552 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
553 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
554
555 DBGPR("<--xgbe_init_function_ptrs_desc\n");
556}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000000..002293b0819d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2182 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/phy.h>
118#include <linux/clk.h>
119
120#include "xgbe.h"
121#include "xgbe-common.h"
122
123
124static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
125 unsigned int usec)
126{
127 unsigned long rate;
128 unsigned int ret;
129
130 DBGPR("-->xgbe_usec_to_riwt\n");
131
132 rate = clk_get_rate(pdata->sysclock);
133
134 /*
135 * Convert the input usec value to the watchdog timer value. Each
136 * watchdog timer value is equivalent to 256 clock cycles.
137 * Calculate the required value as:
138 * ( usec * ( system_clock_mhz / 10^6 ) / 256
139 */
140 ret = (usec * (rate / 1000000)) / 256;
141
142 DBGPR("<--xgbe_usec_to_riwt\n");
143
144 return ret;
145}
146
147static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
148 unsigned int riwt)
149{
150 unsigned long rate;
151 unsigned int ret;
152
153 DBGPR("-->xgbe_riwt_to_usec\n");
154
155 rate = clk_get_rate(pdata->sysclock);
156
157 /*
158 * Convert the input watchdog timer value to the usec value. Each
159 * watchdog timer value is equivalent to 256 clock cycles.
160 * Calculate the required value as:
161 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
162 */
163 ret = (riwt * 256) / (rate / 1000000);
164
165 DBGPR("<--xgbe_riwt_to_usec\n");
166
167 return ret;
168}
169
170static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
171{
172 struct xgbe_channel *channel;
173 unsigned int i;
174
175 channel = pdata->channel;
176 for (i = 0; i < pdata->channel_count; i++, channel++)
177 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
178 pdata->pblx8);
179
180 return 0;
181}
182
183static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
184{
185 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
186}
187
188static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
189{
190 struct xgbe_channel *channel;
191 unsigned int i;
192
193 channel = pdata->channel;
194 for (i = 0; i < pdata->channel_count; i++, channel++) {
195 if (!channel->tx_ring)
196 break;
197
198 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
199 pdata->tx_pbl);
200 }
201
202 return 0;
203}
204
205static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
206{
207 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
208}
209
210static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
211{
212 struct xgbe_channel *channel;
213 unsigned int i;
214
215 channel = pdata->channel;
216 for (i = 0; i < pdata->channel_count; i++, channel++) {
217 if (!channel->rx_ring)
218 break;
219
220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
221 pdata->rx_pbl);
222 }
223
224 return 0;
225}
226
227static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
228{
229 struct xgbe_channel *channel;
230 unsigned int i;
231
232 channel = pdata->channel;
233 for (i = 0; i < pdata->channel_count; i++, channel++) {
234 if (!channel->tx_ring)
235 break;
236
237 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
238 pdata->tx_osp_mode);
239 }
240
241 return 0;
242}
243
244static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
245{
246 unsigned int i;
247
248 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
249 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
250
251 return 0;
252}
253
254static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
255{
256 unsigned int i;
257
258 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
260
261 return 0;
262}
263
264static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
265 unsigned int val)
266{
267 unsigned int i;
268
269 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
270 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
271
272 return 0;
273}
274
275static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
276 unsigned int val)
277{
278 unsigned int i;
279
280 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
281 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
282
283 return 0;
284}
285
286static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
287{
288 struct xgbe_channel *channel;
289 unsigned int i;
290
291 channel = pdata->channel;
292 for (i = 0; i < pdata->channel_count; i++, channel++) {
293 if (!channel->rx_ring)
294 break;
295
296 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
297 pdata->rx_riwt);
298 }
299
300 return 0;
301}
302
303static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
304{
305 return 0;
306}
307
308static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
309{
310 struct xgbe_channel *channel;
311 unsigned int i;
312
313 channel = pdata->channel;
314 for (i = 0; i < pdata->channel_count; i++, channel++) {
315 if (!channel->rx_ring)
316 break;
317
318 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
319 pdata->rx_buf_size);
320 }
321}
322
323static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
324{
325 struct xgbe_channel *channel;
326 unsigned int i;
327
328 channel = pdata->channel;
329 for (i = 0; i < pdata->channel_count; i++, channel++) {
330 if (!channel->tx_ring)
331 break;
332
333 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
334 }
335}
336
337static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
338{
339 unsigned int max_q_count, q_count;
340 unsigned int reg, reg_val;
341 unsigned int i;
342
343 /* Clear MTL flow control */
344 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
345 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
346
347 /* Clear MAC flow control */
348 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
349 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
350 reg = MAC_Q0TFCR;
351 for (i = 0; i < q_count; i++) {
352 reg_val = XGMAC_IOREAD(pdata, reg);
353 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
354 XGMAC_IOWRITE(pdata, reg, reg_val);
355
356 reg += MAC_QTFCR_INC;
357 }
358
359 return 0;
360}
361
362static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
363{
364 unsigned int max_q_count, q_count;
365 unsigned int reg, reg_val;
366 unsigned int i;
367
368 /* Set MTL flow control */
369 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
370 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
371
372 /* Set MAC flow control */
373 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
374 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
375 reg = MAC_Q0TFCR;
376 for (i = 0; i < q_count; i++) {
377 reg_val = XGMAC_IOREAD(pdata, reg);
378
379 /* Enable transmit flow control */
380 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
381 /* Set pause time */
382 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
383
384 XGMAC_IOWRITE(pdata, reg, reg_val);
385
386 reg += MAC_QTFCR_INC;
387 }
388
389 return 0;
390}
391
392static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
393{
394 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
395
396 return 0;
397}
398
399static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
400{
401 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
402
403 return 0;
404}
405
406static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
407{
408 if (pdata->tx_pause)
409 xgbe_enable_tx_flow_control(pdata);
410 else
411 xgbe_disable_tx_flow_control(pdata);
412
413 return 0;
414}
415
416static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
417{
418 if (pdata->rx_pause)
419 xgbe_enable_rx_flow_control(pdata);
420 else
421 xgbe_disable_rx_flow_control(pdata);
422
423 return 0;
424}
425
426static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
427{
428 xgbe_config_tx_flow_control(pdata);
429 xgbe_config_rx_flow_control(pdata);
430}
431
432static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
433{
434 struct xgbe_channel *channel;
435 unsigned int dma_ch_isr, dma_ch_ier;
436 unsigned int i;
437
438 channel = pdata->channel;
439 for (i = 0; i < pdata->channel_count; i++, channel++) {
440 /* Clear all the interrupts which are set */
441 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
442 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
443
444 /* Clear all interrupt enable bits */
445 dma_ch_ier = 0;
446
447 /* Enable following interrupts
448 * NIE - Normal Interrupt Summary Enable
449 * AIE - Abnormal Interrupt Summary Enable
450 * FBEE - Fatal Bus Error Enable
451 */
452 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
453 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
454 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
455
456 if (channel->tx_ring) {
457 /* Enable the following Tx interrupts
458 * TIE - Transmit Interrupt Enable (unless polling)
459 */
460 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
461 }
462 if (channel->rx_ring) {
463 /* Enable following Rx interrupts
464 * RBUE - Receive Buffer Unavailable Enable
465 * RIE - Receive Interrupt Enable
466 */
467 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
468 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
469 }
470
471 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
472 }
473}
474
475static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
476{
477 unsigned int mtl_q_isr;
478 unsigned int q_count, i;
479
480 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
481 for (i = 0; i < q_count; i++) {
482 /* Clear all the interrupts which are set */
483 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
484 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
485
486 /* No MTL interrupts to be enabled */
487 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
488 }
489}
490
491static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
492{
493 /* No MAC interrupts to be enabled */
494 XGMAC_IOWRITE(pdata, MAC_IER, 0);
495
496 /* Enable all counter interrupts */
497 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
498 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
499}
500
501static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
502{
503 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
504
505 return 0;
506}
507
508static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
509{
510 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
511
512 return 0;
513}
514
515static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
516{
517 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
518
519 return 0;
520}
521
522static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
523 unsigned int enable)
524{
525 unsigned int val = enable ? 1 : 0;
526
527 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
528 return 0;
529
530 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
531 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
532
533 return 0;
534}
535
536static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
537 unsigned int enable)
538{
539 unsigned int val = enable ? 1 : 0;
540
541 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
542 return 0;
543
544 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
545 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
546
547 return 0;
548}
549
550static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
551 unsigned int am_mode)
552{
553 struct netdev_hw_addr *ha;
554 unsigned int mac_reg;
555 unsigned int mac_addr_hi, mac_addr_lo;
556 u8 *mac_addr;
557 unsigned int i;
558
559 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
560 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
561
562 i = 0;
563 mac_reg = MAC_MACA1HR;
564
565 netdev_for_each_uc_addr(ha, pdata->netdev) {
566 mac_addr_lo = 0;
567 mac_addr_hi = 0;
568 mac_addr = (u8 *)&mac_addr_lo;
569 mac_addr[0] = ha->addr[0];
570 mac_addr[1] = ha->addr[1];
571 mac_addr[2] = ha->addr[2];
572 mac_addr[3] = ha->addr[3];
573 mac_addr = (u8 *)&mac_addr_hi;
574 mac_addr[0] = ha->addr[4];
575 mac_addr[1] = ha->addr[5];
576
577 DBGPR(" adding unicast address %pM at 0x%04x\n",
578 ha->addr, mac_reg);
579
580 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
581
582 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
583 mac_reg += MAC_MACA_INC;
584 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
585 mac_reg += MAC_MACA_INC;
586
587 i++;
588 }
589
590 if (!am_mode) {
591 netdev_for_each_mc_addr(ha, pdata->netdev) {
592 mac_addr_lo = 0;
593 mac_addr_hi = 0;
594 mac_addr = (u8 *)&mac_addr_lo;
595 mac_addr[0] = ha->addr[0];
596 mac_addr[1] = ha->addr[1];
597 mac_addr[2] = ha->addr[2];
598 mac_addr[3] = ha->addr[3];
599 mac_addr = (u8 *)&mac_addr_hi;
600 mac_addr[0] = ha->addr[4];
601 mac_addr[1] = ha->addr[5];
602
603 DBGPR(" adding multicast address %pM at 0x%04x\n",
604 ha->addr, mac_reg);
605
606 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
607
608 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
609 mac_reg += MAC_MACA_INC;
610 XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
611 mac_reg += MAC_MACA_INC;
612
613 i++;
614 }
615 }
616
617 /* Clear remaining additional MAC address entries */
618 for (; i < pdata->hw_feat.addn_mac; i++) {
619 XGMAC_IOWRITE(pdata, mac_reg, 0);
620 mac_reg += MAC_MACA_INC;
621 XGMAC_IOWRITE(pdata, mac_reg, 0);
622 mac_reg += MAC_MACA_INC;
623 }
624
625 return 0;
626}
627
628static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
629{
630 unsigned int mac_addr_hi, mac_addr_lo;
631
632 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
633 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
634 (addr[1] << 8) | (addr[0] << 0);
635
636 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
637 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
638
639 return 0;
640}
641
642static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
643 int mmd_reg)
644{
645 unsigned int mmd_address;
646 int mmd_data;
647
648 if (mmd_reg & MII_ADDR_C45)
649 mmd_address = mmd_reg & ~MII_ADDR_C45;
650 else
651 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
652
653 /* The PCS registers are accessed using mmio. The underlying APB3
654 * management interface uses indirect addressing to access the MMD
655 * register sets. This requires accessing of the PCS register in two
656 * phases, an address phase and a data phase.
657 *
658 * The mmio interface is based on 32-bit offsets and values. All
659 * register offsets must therefore be adjusted by left shifting the
660 * offset 2 bits and reading 32 bits of data.
661 */
662 mutex_lock(&pdata->xpcs_mutex);
663 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
664 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
665 mutex_unlock(&pdata->xpcs_mutex);
666
667 return mmd_data;
668}
669
670static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
671 int mmd_reg, int mmd_data)
672{
673 unsigned int mmd_address;
674
675 if (mmd_reg & MII_ADDR_C45)
676 mmd_address = mmd_reg & ~MII_ADDR_C45;
677 else
678 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
679
680 /* The PCS registers are accessed using mmio. The underlying APB3
681 * management interface uses indirect addressing to access the MMD
682 * register sets. This requires accessing of the PCS register in two
683 * phases, an address phase and a data phase.
684 *
685 * The mmio interface is based on 32-bit offsets and values. All
686 * register offsets must therefore be adjusted by left shifting the
687 * offset 2 bits and reading 32 bits of data.
688 */
689 mutex_lock(&pdata->xpcs_mutex);
690 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
691 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
692 mutex_unlock(&pdata->xpcs_mutex);
693}
694
695static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
696{
697 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
698}
699
700static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
701{
702 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
703
704 return 0;
705}
706
707static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
708{
709 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
710
711 return 0;
712}
713
714static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
715{
716 /* Put the VLAN tag in the Rx descriptor */
717 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
718
719 /* Don't check the VLAN type */
720 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
721
722 /* Check only C-TAG (0x8100) packets */
723 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
724
725 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
726 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
727
728 /* Enable VLAN tag stripping */
729 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
730
731 return 0;
732}
733
734static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
735{
736 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
737
738 return 0;
739}
740
741static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
742{
743 struct xgbe_ring_desc *rdesc = rdata->rdesc;
744
745 /* Reset the Tx descriptor
746 * Set buffer 1 (lo) address to zero
747 * Set buffer 1 (hi) address to zero
748 * Reset all other control bits (IC, TTSE, B2L & B1L)
749 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
750 */
751 rdesc->desc0 = 0;
752 rdesc->desc1 = 0;
753 rdesc->desc2 = 0;
754 rdesc->desc3 = 0;
755}
756
757static void xgbe_tx_desc_init(struct xgbe_channel *channel)
758{
759 struct xgbe_ring *ring = channel->tx_ring;
760 struct xgbe_ring_data *rdata;
761 struct xgbe_ring_desc *rdesc;
762 int i;
763 int start_index = ring->cur;
764
765 DBGPR("-->tx_desc_init\n");
766
767 /* Initialze all descriptors */
768 for (i = 0; i < ring->rdesc_count; i++) {
769 rdata = GET_DESC_DATA(ring, i);
770 rdesc = rdata->rdesc;
771
772 /* Initialize Tx descriptor
773 * Set buffer 1 (lo) address to zero
774 * Set buffer 1 (hi) address to zero
775 * Reset all other control bits (IC, TTSE, B2L & B1L)
776 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
777 * etc)
778 */
779 rdesc->desc0 = 0;
780 rdesc->desc1 = 0;
781 rdesc->desc2 = 0;
782 rdesc->desc3 = 0;
783 }
784
785 /* Make sure everything is written to the descriptor(s) before
786 * telling the device about them
787 */
788 wmb();
789
790 /* Update the total number of Tx descriptors */
791 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
792
793 /* Update the starting address of descriptor ring */
794 rdata = GET_DESC_DATA(ring, start_index);
795 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
796 upper_32_bits(rdata->rdesc_dma));
797 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
798 lower_32_bits(rdata->rdesc_dma));
799
800 DBGPR("<--tx_desc_init\n");
801}
802
803static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
804{
805 struct xgbe_ring_desc *rdesc = rdata->rdesc;
806
807 /* Reset the Rx descriptor
808 * Set buffer 1 (lo) address to dma address (lo)
809 * Set buffer 1 (hi) address to dma address (hi)
810 * Set buffer 2 (lo) address to zero
811 * Set buffer 2 (hi) address to zero and set control bits
812 * OWN and INTE
813 */
814 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
815 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
816 rdesc->desc2 = 0;
817
818 rdesc->desc3 = 0;
819 if (rdata->interrupt)
820 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
821
822 /* Since the Rx DMA engine is likely running, make sure everything
823 * is written to the descriptor(s) before setting the OWN bit
824 * for the descriptor
825 */
826 wmb();
827
828 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
829
830 /* Make sure ownership is written to the descriptor */
831 wmb();
832}
833
834static void xgbe_rx_desc_init(struct xgbe_channel *channel)
835{
836 struct xgbe_prv_data *pdata = channel->pdata;
837 struct xgbe_ring *ring = channel->rx_ring;
838 struct xgbe_ring_data *rdata;
839 struct xgbe_ring_desc *rdesc;
840 unsigned int start_index = ring->cur;
841 unsigned int rx_coalesce, rx_frames;
842 unsigned int i;
843
844 DBGPR("-->rx_desc_init\n");
845
846 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
847 rx_frames = pdata->rx_frames;
848
849 /* Initialize all descriptors */
850 for (i = 0; i < ring->rdesc_count; i++) {
851 rdata = GET_DESC_DATA(ring, i);
852 rdesc = rdata->rdesc;
853
854 /* Initialize Rx descriptor
855 * Set buffer 1 (lo) address to dma address (lo)
856 * Set buffer 1 (hi) address to dma address (hi)
857 * Set buffer 2 (lo) address to zero
858 * Set buffer 2 (hi) address to zero and set control
859 * bits OWN and INTE appropriateley
860 */
861 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
862 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
863 rdesc->desc2 = 0;
864 rdesc->desc3 = 0;
865 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
866 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
867 rdata->interrupt = 1;
868 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
869 /* Clear interrupt on completion bit */
870 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
871 0);
872 rdata->interrupt = 0;
873 }
874 }
875
876 /* Make sure everything is written to the descriptors before
877 * telling the device about them
878 */
879 wmb();
880
881 /* Update the total number of Rx descriptors */
882 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
883
884 /* Update the starting address of descriptor ring */
885 rdata = GET_DESC_DATA(ring, start_index);
886 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
887 upper_32_bits(rdata->rdesc_dma));
888 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
889 lower_32_bits(rdata->rdesc_dma));
890
891 /* Update the Rx Descriptor Tail Pointer */
892 rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
893 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
894 lower_32_bits(rdata->rdesc_dma));
895
896 DBGPR("<--rx_desc_init\n");
897}
898
899static void xgbe_pre_xmit(struct xgbe_channel *channel)
900{
901 struct xgbe_prv_data *pdata = channel->pdata;
902 struct xgbe_ring *ring = channel->tx_ring;
903 struct xgbe_ring_data *rdata;
904 struct xgbe_ring_desc *rdesc;
905 struct xgbe_packet_data *packet = &ring->packet_data;
906 unsigned int csum, tso, vlan;
907 unsigned int tso_context, vlan_context;
908 unsigned int tx_coalesce, tx_frames;
909 int start_index = ring->cur;
910 int i;
911
912 DBGPR("-->xgbe_pre_xmit\n");
913
914 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
915 CSUM_ENABLE);
916 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
917 TSO_ENABLE);
918 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
919 VLAN_CTAG);
920
921 if (tso && (packet->mss != ring->tx.cur_mss))
922 tso_context = 1;
923 else
924 tso_context = 0;
925
926 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
927 vlan_context = 1;
928 else
929 vlan_context = 0;
930
931 tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
932 tx_frames = pdata->tx_frames;
933 if (tx_coalesce && !channel->tx_timer_active)
934 ring->coalesce_count = 0;
935
936 rdata = GET_DESC_DATA(ring, ring->cur);
937 rdesc = rdata->rdesc;
938
939 /* Create a context descriptor if this is a TSO packet */
940 if (tso_context || vlan_context) {
941 if (tso_context) {
942 DBGPR(" TSO context descriptor, mss=%u\n",
943 packet->mss);
944
945 /* Set the MSS size */
946 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
947 MSS, packet->mss);
948
949 /* Mark it as a CONTEXT descriptor */
950 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
951 CTXT, 1);
952
953 /* Indicate this descriptor contains the MSS */
954 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
955 TCMSSV, 1);
956
957 ring->tx.cur_mss = packet->mss;
958 }
959
960 if (vlan_context) {
961 DBGPR(" VLAN context descriptor, ctag=%u\n",
962 packet->vlan_ctag);
963
964 /* Mark it as a CONTEXT descriptor */
965 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
966 CTXT, 1);
967
968 /* Set the VLAN tag */
969 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
970 VT, packet->vlan_ctag);
971
972 /* Indicate this descriptor contains the VLAN tag */
973 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
974 VLTV, 1);
975
976 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
977 }
978
979 ring->cur++;
980 rdata = GET_DESC_DATA(ring, ring->cur);
981 rdesc = rdata->rdesc;
982 }
983
984 /* Update buffer address (for TSO this is the header) */
985 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
986 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
987
988 /* Update the buffer length */
989 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
990 rdata->skb_dma_len);
991
992 /* VLAN tag insertion check */
993 if (vlan)
994 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
995 TX_NORMAL_DESC2_VLAN_INSERT);
996
997 /* Set IC bit based on Tx coalescing settings */
998 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
999 if (tx_coalesce && (!tx_frames ||
1000 (++ring->coalesce_count % tx_frames)))
1001 /* Clear IC bit */
1002 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1003
1004 /* Mark it as First Descriptor */
1005 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1006
1007 /* Mark it as a NORMAL descriptor */
1008 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1009
1010 /* Set OWN bit if not the first descriptor */
1011 if (ring->cur != start_index)
1012 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1013
1014 if (tso) {
1015 /* Enable TSO */
1016 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1017 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1018 packet->tcp_payload_len);
1019 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1020 packet->tcp_header_len / 4);
1021 } else {
1022 /* Enable CRC and Pad Insertion */
1023 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1024
1025 /* Enable HW CSUM */
1026 if (csum)
1027 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1028 CIC, 0x3);
1029
1030 /* Set the total length to be transmitted */
1031 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1032 packet->length);
1033 }
1034
1035 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
1036 ring->cur++;
1037 rdata = GET_DESC_DATA(ring, ring->cur);
1038 rdesc = rdata->rdesc;
1039
1040 /* Update buffer address */
1041 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1042 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1043
1044 /* Update the buffer length */
1045 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1046 rdata->skb_dma_len);
1047
1048 /* Set IC bit based on Tx coalescing settings */
1049 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1050 if (tx_coalesce && (!tx_frames ||
1051 (++ring->coalesce_count % tx_frames)))
1052 /* Clear IC bit */
1053 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1054
1055 /* Set OWN bit */
1056 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1057
1058 /* Mark it as NORMAL descriptor */
1059 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1060
1061 /* Enable HW CSUM */
1062 if (csum)
1063 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1064 CIC, 0x3);
1065 }
1066
1067 /* Set LAST bit for the last descriptor */
1068 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1069
1070 /* In case the Tx DMA engine is running, make sure everything
1071 * is written to the descriptor(s) before setting the OWN bit
1072 * for the first descriptor
1073 */
1074 wmb();
1075
1076 /* Set OWN bit for the first descriptor */
1077 rdata = GET_DESC_DATA(ring, start_index);
1078 rdesc = rdata->rdesc;
1079 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1080
1081#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1082 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1083#endif
1084
1085 /* Make sure ownership is written to the descriptor */
1086 wmb();
1087
1088 /* Issue a poll command to Tx DMA by writing address
1089 * of next immediate free descriptor */
1090 ring->cur++;
1091 rdata = GET_DESC_DATA(ring, ring->cur);
1092 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1093 lower_32_bits(rdata->rdesc_dma));
1094
1095 /* Start the Tx coalescing timer */
1096 if (tx_coalesce && !channel->tx_timer_active) {
1097 channel->tx_timer_active = 1;
1098 hrtimer_start(&channel->tx_timer,
1099 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
1100 HRTIMER_MODE_REL);
1101 }
1102
1103 DBGPR(" %s: descriptors %u to %u written\n",
1104 channel->name, start_index & (ring->rdesc_count - 1),
1105 (ring->cur - 1) & (ring->rdesc_count - 1));
1106
1107 DBGPR("<--xgbe_pre_xmit\n");
1108}
1109
1110static int xgbe_dev_read(struct xgbe_channel *channel)
1111{
1112 struct xgbe_ring *ring = channel->rx_ring;
1113 struct xgbe_ring_data *rdata;
1114 struct xgbe_ring_desc *rdesc;
1115 struct xgbe_packet_data *packet = &ring->packet_data;
1116 unsigned int err, etlt;
1117
1118 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1119
1120 rdata = GET_DESC_DATA(ring, ring->cur);
1121 rdesc = rdata->rdesc;
1122
1123 /* Check for data availability */
1124 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1125 return 1;
1126
1127#ifdef XGMAC_ENABLE_RX_DESC_DUMP
1128 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1129#endif
1130
1131 /* Get the packet length */
1132 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1133
1134 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1135 /* Not all the data has been transferred for this packet */
1136 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1137 INCOMPLETE, 1);
1138 return 0;
1139 }
1140
1141 /* This is the last of the data for this packet */
1142 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1143 INCOMPLETE, 0);
1144
1145 /* Set checksum done indicator as appropriate */
1146 if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
1147 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1148 CSUM_DONE, 1);
1149
1150 /* Check for errors (only valid in last descriptor) */
1151 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1152 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1153 DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1154
1155 if (!err || (err && !etlt)) {
1156 if (etlt == 0x09) {
1157 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1158 VLAN_CTAG, 1);
1159 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1160 RX_NORMAL_DESC0,
1161 OVT);
1162 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1163 }
1164 } else {
1165 if ((etlt == 0x05) || (etlt == 0x06))
1166 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1167 CSUM_DONE, 0);
1168 else
1169 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1170 FRAME, 1);
1171 }
1172
1173 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1174 ring->cur & (ring->rdesc_count - 1), ring->cur);
1175
1176 return 0;
1177}
1178
1179static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1180{
1181 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1182 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1183}
1184
1185static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1186{
1187 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1188 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1189}
1190
1191static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
1192 enum xgbe_int_state int_state)
1193{
1194 unsigned int dma_ch_ier;
1195
1196 if (int_state == XGMAC_INT_STATE_SAVE) {
1197 channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1198 channel->saved_ier &= DMA_INTERRUPT_MASK;
1199 } else {
1200 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1201 dma_ch_ier |= channel->saved_ier;
1202 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1203 }
1204}
1205
1206static int xgbe_enable_int(struct xgbe_channel *channel,
1207 enum xgbe_int int_id)
1208{
1209 switch (int_id) {
1210 case XGMAC_INT_DMA_ISR_DC0IS:
1211 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1212 break;
1213 case XGMAC_INT_DMA_CH_SR_TI:
1214 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1215 break;
1216 case XGMAC_INT_DMA_CH_SR_TPS:
1217 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
1218 break;
1219 case XGMAC_INT_DMA_CH_SR_TBU:
1220 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
1221 break;
1222 case XGMAC_INT_DMA_CH_SR_RI:
1223 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
1224 break;
1225 case XGMAC_INT_DMA_CH_SR_RBU:
1226 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
1227 break;
1228 case XGMAC_INT_DMA_CH_SR_RPS:
1229 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
1230 break;
1231 case XGMAC_INT_DMA_CH_SR_FBE:
1232 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
1233 break;
1234 case XGMAC_INT_DMA_ALL:
1235 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
1236 break;
1237 default:
1238 return -1;
1239 }
1240
1241 return 0;
1242}
1243
1244static int xgbe_disable_int(struct xgbe_channel *channel,
1245 enum xgbe_int int_id)
1246{
1247 unsigned int dma_ch_ier;
1248
1249 switch (int_id) {
1250 case XGMAC_INT_DMA_ISR_DC0IS:
1251 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1252 break;
1253 case XGMAC_INT_DMA_CH_SR_TI:
1254 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1255 break;
1256 case XGMAC_INT_DMA_CH_SR_TPS:
1257 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
1258 break;
1259 case XGMAC_INT_DMA_CH_SR_TBU:
1260 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
1261 break;
1262 case XGMAC_INT_DMA_CH_SR_RI:
1263 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
1264 break;
1265 case XGMAC_INT_DMA_CH_SR_RBU:
1266 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
1267 break;
1268 case XGMAC_INT_DMA_CH_SR_RPS:
1269 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
1270 break;
1271 case XGMAC_INT_DMA_CH_SR_FBE:
1272 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
1273 break;
1274 case XGMAC_INT_DMA_ALL:
1275 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
1276
1277 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1278 dma_ch_ier &= ~DMA_INTERRUPT_MASK;
1279 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1280 break;
1281 default:
1282 return -1;
1283 }
1284
1285 return 0;
1286}
1287
1288static int xgbe_exit(struct xgbe_prv_data *pdata)
1289{
1290 unsigned int count = 2000;
1291
1292 DBGPR("-->xgbe_exit\n");
1293
1294 /* Issue a software reset */
1295 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1296 usleep_range(10, 15);
1297
1298 /* Poll Until Poll Condition */
1299 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1300 usleep_range(500, 600);
1301
1302 if (!count)
1303 return -EBUSY;
1304
1305 DBGPR("<--xgbe_exit\n");
1306
1307 return 0;
1308}
1309
1310static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1311{
1312 unsigned int i, count;
1313
1314 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1315 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1316
1317 /* Poll Until Poll Condition */
1318 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
1319 count = 2000;
1320 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1321 MTL_Q_TQOMR, FTQ))
1322 usleep_range(500, 600);
1323
1324 if (!count)
1325 return -EBUSY;
1326 }
1327
1328 return 0;
1329}
1330
1331static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1332{
1333 /* Set enhanced addressing mode */
1334 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1335
1336 /* Set the System Bus mode */
1337 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1338}
1339
1340static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1341{
1342 unsigned int arcache, awcache;
1343
1344 arcache = 0;
1345 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
1346 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
1347 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
1348 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
1349 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
1350 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
1351 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1352
1353 awcache = 0;
1354 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
1355 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
1356 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
1357 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
1358 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
1359 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
1360 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
1361 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
1362 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1363}
1364
1365static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1366{
1367 unsigned int i;
1368
1369 /* Set Tx to weighted round robin scheduling algorithm (when
1370 * traffic class is using ETS algorithm)
1371 */
1372 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1373
1374 /* Set Tx traffic classes to strict priority algorithm */
1375 for (i = 0; i < XGBE_TC_CNT; i++)
1376 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
1377
1378 /* Set Rx to strict priority algorithm */
1379 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1380}
1381
1382static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1383 unsigned char queue_count)
1384{
1385 unsigned int q_fifo_size = 0;
1386 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1387
1388 /* Calculate Tx/Rx fifo share per queue */
1389 switch (fifo_size) {
1390 case 0:
1391 q_fifo_size = FIFO_SIZE_B(128);
1392 break;
1393 case 1:
1394 q_fifo_size = FIFO_SIZE_B(256);
1395 break;
1396 case 2:
1397 q_fifo_size = FIFO_SIZE_B(512);
1398 break;
1399 case 3:
1400 q_fifo_size = FIFO_SIZE_KB(1);
1401 break;
1402 case 4:
1403 q_fifo_size = FIFO_SIZE_KB(2);
1404 break;
1405 case 5:
1406 q_fifo_size = FIFO_SIZE_KB(4);
1407 break;
1408 case 6:
1409 q_fifo_size = FIFO_SIZE_KB(8);
1410 break;
1411 case 7:
1412 q_fifo_size = FIFO_SIZE_KB(16);
1413 break;
1414 case 8:
1415 q_fifo_size = FIFO_SIZE_KB(32);
1416 break;
1417 case 9:
1418 q_fifo_size = FIFO_SIZE_KB(64);
1419 break;
1420 case 10:
1421 q_fifo_size = FIFO_SIZE_KB(128);
1422 break;
1423 case 11:
1424 q_fifo_size = FIFO_SIZE_KB(256);
1425 break;
1426 }
1427 q_fifo_size = q_fifo_size / queue_count;
1428
1429 /* Set the queue fifo size programmable value */
1430 if (q_fifo_size >= FIFO_SIZE_KB(256))
1431 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
1432 else if (q_fifo_size >= FIFO_SIZE_KB(128))
1433 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
1434 else if (q_fifo_size >= FIFO_SIZE_KB(64))
1435 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
1436 else if (q_fifo_size >= FIFO_SIZE_KB(32))
1437 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
1438 else if (q_fifo_size >= FIFO_SIZE_KB(16))
1439 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
1440 else if (q_fifo_size >= FIFO_SIZE_KB(8))
1441 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
1442 else if (q_fifo_size >= FIFO_SIZE_KB(4))
1443 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
1444 else if (q_fifo_size >= FIFO_SIZE_KB(2))
1445 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
1446 else if (q_fifo_size >= FIFO_SIZE_KB(1))
1447 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
1448 else if (q_fifo_size >= FIFO_SIZE_B(512))
1449 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
1450 else if (q_fifo_size >= FIFO_SIZE_B(256))
1451 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1452
1453 return p_fifo;
1454}
1455
1456static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1457{
1458 enum xgbe_mtl_fifo_size fifo_size;
1459 unsigned int i;
1460
1461 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1462 pdata->hw_feat.tx_q_cnt);
1463
1464 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1465 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1466
1467 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1468 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
1469}
1470
1471static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1472{
1473 enum xgbe_mtl_fifo_size fifo_size;
1474 unsigned int i;
1475
1476 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1477 pdata->hw_feat.rx_q_cnt);
1478
1479 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1480 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1481
1482 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1483 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
1484}
1485
1486static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
1487{
1488 unsigned int i, reg, reg_val;
1489 unsigned int q_count = pdata->hw_feat.rx_q_cnt;
1490
1491 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1492 reg = MTL_RQDCM0R;
1493 reg_val = 0;
1494 for (i = 0; i < q_count;) {
1495 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1496
1497 if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
1498 continue;
1499
1500 XGMAC_IOWRITE(pdata, reg, reg_val);
1501
1502 reg += MTL_RQDCM_INC;
1503 reg_val = 0;
1504 }
1505}
1506
1507static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
1512 /* Activate flow control when less than 4k left in fifo */
1513 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
1514
1515 /* De-activate flow control when more than 6k left in fifo */
1516 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
1517 }
1518}
1519
1520static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
1521{
1522 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
1523}
1524
1525static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
1526{
1527 unsigned int val;
1528
1529 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
1530
1531 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1532}
1533
1534static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
1535{
1536 if (pdata->netdev->features & NETIF_F_RXCSUM)
1537 xgbe_enable_rx_csum(pdata);
1538 else
1539 xgbe_disable_rx_csum(pdata);
1540}
1541
1542static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1543{
1544 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1545 xgbe_enable_rx_vlan_stripping(pdata);
1546 else
1547 xgbe_disable_rx_vlan_stripping(pdata);
1548}
1549
1550static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1551{
1552 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1553 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
1554
1555 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1556 stats->txoctetcount_gb +=
1557 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1558
1559 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1560 stats->txframecount_gb +=
1561 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1562
1563 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1564 stats->txbroadcastframes_g +=
1565 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1566
1567 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1568 stats->txmulticastframes_g +=
1569 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1570
1571 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1572 stats->tx64octets_gb +=
1573 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1574
1575 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1576 stats->tx65to127octets_gb +=
1577 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1578
1579 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1580 stats->tx128to255octets_gb +=
1581 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1582
1583 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1584 stats->tx256to511octets_gb +=
1585 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1586
1587 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1588 stats->tx512to1023octets_gb +=
1589 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1590
1591 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1592 stats->tx1024tomaxoctets_gb +=
1593 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1594
1595 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1596 stats->txunicastframes_gb +=
1597 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1598
1599 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
1600 stats->txmulticastframes_gb +=
1601 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1602
1603 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
1604 stats->txbroadcastframes_g +=
1605 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1606
1607 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
1608 stats->txunderflowerror +=
1609 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1610
1611 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
1612 stats->txoctetcount_g +=
1613 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1614
1615 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
1616 stats->txframecount_g +=
1617 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1618
1619 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
1620 stats->txpauseframes +=
1621 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1622
1623 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
1624 stats->txvlanframes_g +=
1625 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1626}
1627
1628static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
1629{
1630 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1631 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
1632
1633 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
1634 stats->rxframecount_gb +=
1635 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1636
1637 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
1638 stats->rxoctetcount_gb +=
1639 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1640
1641 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
1642 stats->rxoctetcount_g +=
1643 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1644
1645 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
1646 stats->rxbroadcastframes_g +=
1647 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1648
1649 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
1650 stats->rxmulticastframes_g +=
1651 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1652
1653 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
1654 stats->rxcrcerror +=
1655 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1656
1657 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
1658 stats->rxrunterror +=
1659 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1660
1661 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
1662 stats->rxjabbererror +=
1663 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1664
1665 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
1666 stats->rxundersize_g +=
1667 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1668
1669 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
1670 stats->rxoversize_g +=
1671 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1672
1673 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
1674 stats->rx64octets_gb +=
1675 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1676
1677 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
1678 stats->rx65to127octets_gb +=
1679 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1680
1681 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
1682 stats->rx128to255octets_gb +=
1683 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1684
1685 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
1686 stats->rx256to511octets_gb +=
1687 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1688
1689 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
1690 stats->rx512to1023octets_gb +=
1691 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1692
1693 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
1694 stats->rx1024tomaxoctets_gb +=
1695 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1696
1697 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
1698 stats->rxunicastframes_g +=
1699 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1700
1701 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
1702 stats->rxlengtherror +=
1703 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1704
1705 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
1706 stats->rxoutofrangetype +=
1707 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1708
1709 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
1710 stats->rxpauseframes +=
1711 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1712
1713 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
1714 stats->rxfifooverflow +=
1715 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1716
1717 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
1718 stats->rxvlanframes_gb +=
1719 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1720
1721 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
1722 stats->rxwatchdogerror +=
1723 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1724}
1725
1726static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
1727{
1728 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1729
1730 /* Freeze counters */
1731 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
1732
1733 stats->txoctetcount_gb +=
1734 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1735
1736 stats->txframecount_gb +=
1737 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1738
1739 stats->txbroadcastframes_g +=
1740 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1741
1742 stats->txmulticastframes_g +=
1743 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1744
1745 stats->tx64octets_gb +=
1746 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1747
1748 stats->tx65to127octets_gb +=
1749 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1750
1751 stats->tx128to255octets_gb +=
1752 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1753
1754 stats->tx256to511octets_gb +=
1755 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1756
1757 stats->tx512to1023octets_gb +=
1758 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1759
1760 stats->tx1024tomaxoctets_gb +=
1761 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1762
1763 stats->txunicastframes_gb +=
1764 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1765
1766 stats->txmulticastframes_gb +=
1767 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1768
1769 stats->txbroadcastframes_g +=
1770 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1771
1772 stats->txunderflowerror +=
1773 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1774
1775 stats->txoctetcount_g +=
1776 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1777
1778 stats->txframecount_g +=
1779 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1780
1781 stats->txpauseframes +=
1782 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1783
1784 stats->txvlanframes_g +=
1785 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1786
1787 stats->rxframecount_gb +=
1788 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1789
1790 stats->rxoctetcount_gb +=
1791 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1792
1793 stats->rxoctetcount_g +=
1794 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1795
1796 stats->rxbroadcastframes_g +=
1797 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1798
1799 stats->rxmulticastframes_g +=
1800 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1801
1802 stats->rxcrcerror +=
1803 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1804
1805 stats->rxrunterror +=
1806 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1807
1808 stats->rxjabbererror +=
1809 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1810
1811 stats->rxundersize_g +=
1812 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1813
1814 stats->rxoversize_g +=
1815 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1816
1817 stats->rx64octets_gb +=
1818 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1819
1820 stats->rx65to127octets_gb +=
1821 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1822
1823 stats->rx128to255octets_gb +=
1824 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1825
1826 stats->rx256to511octets_gb +=
1827 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1828
1829 stats->rx512to1023octets_gb +=
1830 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1831
1832 stats->rx1024tomaxoctets_gb +=
1833 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1834
1835 stats->rxunicastframes_g +=
1836 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1837
1838 stats->rxlengtherror +=
1839 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1840
1841 stats->rxoutofrangetype +=
1842 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1843
1844 stats->rxpauseframes +=
1845 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1846
1847 stats->rxfifooverflow +=
1848 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1849
1850 stats->rxvlanframes_gb +=
1851 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1852
1853 stats->rxwatchdogerror +=
1854 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1855
1856 /* Un-freeze counters */
1857 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1858}
1859
1860static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
1861{
1862 /* Set counters to reset on read */
1863 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1864
1865 /* Reset the counters */
1866 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1867}
1868
1869static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
1870{
1871 struct xgbe_channel *channel;
1872 unsigned int i;
1873
1874 /* Enable each Tx DMA channel */
1875 channel = pdata->channel;
1876 for (i = 0; i < pdata->channel_count; i++, channel++) {
1877 if (!channel->tx_ring)
1878 break;
1879
1880 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
1881 }
1882
1883 /* Enable each Tx queue */
1884 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1885 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
1886 MTL_Q_ENABLED);
1887
1888 /* Enable MAC Tx */
1889 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
1890}
1891
1892static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
1893{
1894 struct xgbe_channel *channel;
1895 unsigned int i;
1896
1897 /* Disable MAC Tx */
1898 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
1899
1900 /* Disable each Tx queue */
1901 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1902 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
1903
1904 /* Disable each Tx DMA channel */
1905 channel = pdata->channel;
1906 for (i = 0; i < pdata->channel_count; i++, channel++) {
1907 if (!channel->tx_ring)
1908 break;
1909
1910 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
1911 }
1912}
1913
1914static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
1915{
1916 struct xgbe_channel *channel;
1917 unsigned int reg_val, i;
1918
1919 /* Enable each Rx DMA channel */
1920 channel = pdata->channel;
1921 for (i = 0; i < pdata->channel_count; i++, channel++) {
1922 if (!channel->rx_ring)
1923 break;
1924
1925 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
1926 }
1927
1928 /* Enable each Rx queue */
1929 reg_val = 0;
1930 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1931 reg_val |= (0x02 << (i << 1));
1932 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
1933
1934 /* Enable MAC Rx */
1935 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
1936 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
1937 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
1938 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
1939}
1940
1941static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
1942{
1943 struct xgbe_channel *channel;
1944 unsigned int i;
1945
1946 /* Disable MAC Rx */
1947 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
1948 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
1949 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
1950 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
1951
1952 /* Disable each Rx queue */
1953 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
1954
1955 /* Disable each Rx DMA channel */
1956 channel = pdata->channel;
1957 for (i = 0; i < pdata->channel_count; i++, channel++) {
1958 if (!channel->rx_ring)
1959 break;
1960
1961 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
1962 }
1963}
1964
1965static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
1966{
1967 struct xgbe_channel *channel;
1968 unsigned int i;
1969
1970 /* Enable each Tx DMA channel */
1971 channel = pdata->channel;
1972 for (i = 0; i < pdata->channel_count; i++, channel++) {
1973 if (!channel->tx_ring)
1974 break;
1975
1976 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
1977 }
1978
1979 /* Enable MAC Tx */
1980 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
1981}
1982
1983static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
1984{
1985 struct xgbe_channel *channel;
1986 unsigned int i;
1987
1988 /* Disable MAC Tx */
1989 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
1990
1991 /* Disable each Tx DMA channel */
1992 channel = pdata->channel;
1993 for (i = 0; i < pdata->channel_count; i++, channel++) {
1994 if (!channel->tx_ring)
1995 break;
1996
1997 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
1998 }
1999}
2000
2001static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2002{
2003 struct xgbe_channel *channel;
2004 unsigned int i;
2005
2006 /* Enable each Rx DMA channel */
2007 channel = pdata->channel;
2008 for (i = 0; i < pdata->channel_count; i++, channel++) {
2009 if (!channel->rx_ring)
2010 break;
2011
2012 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2013 }
2014}
2015
2016static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2017{
2018 struct xgbe_channel *channel;
2019 unsigned int i;
2020
2021 /* Disable each Rx DMA channel */
2022 channel = pdata->channel;
2023 for (i = 0; i < pdata->channel_count; i++, channel++) {
2024 if (!channel->rx_ring)
2025 break;
2026
2027 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2028 }
2029}
2030
2031static int xgbe_init(struct xgbe_prv_data *pdata)
2032{
2033 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2034 int ret;
2035
2036 DBGPR("-->xgbe_init\n");
2037
2038 /* Flush Tx queues */
2039 ret = xgbe_flush_tx_queues(pdata);
2040 if (ret)
2041 return ret;
2042
2043 /*
2044 * Initialize DMA related features
2045 */
2046 xgbe_config_dma_bus(pdata);
2047 xgbe_config_dma_cache(pdata);
2048 xgbe_config_osp_mode(pdata);
2049 xgbe_config_pblx8(pdata);
2050 xgbe_config_tx_pbl_val(pdata);
2051 xgbe_config_rx_pbl_val(pdata);
2052 xgbe_config_rx_coalesce(pdata);
2053 xgbe_config_tx_coalesce(pdata);
2054 xgbe_config_rx_buffer_size(pdata);
2055 xgbe_config_tso_mode(pdata);
2056 desc_if->wrapper_tx_desc_init(pdata);
2057 desc_if->wrapper_rx_desc_init(pdata);
2058 xgbe_enable_dma_interrupts(pdata);
2059
2060 /*
2061 * Initialize MTL related features
2062 */
2063 xgbe_config_mtl_mode(pdata);
2064 xgbe_config_rx_queue_mapping(pdata);
2065 /*TODO: Program the priorities mapped to the Selected Traffic Classes
2066 in MTL_TC_Prty_Map0-3 registers */
2067 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2068 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2069 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2070 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2071 xgbe_config_tx_fifo_size(pdata);
2072 xgbe_config_rx_fifo_size(pdata);
2073 xgbe_config_flow_control_threshold(pdata);
2074 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
2075 /*TODO: Error Packet and undersized good Packet forwarding enable
2076 (FEP and FUP)
2077 */
2078 xgbe_enable_mtl_interrupts(pdata);
2079
2080 /* Transmit Class Weight */
2081 XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
2082
2083 /*
2084 * Initialize MAC related features
2085 */
2086 xgbe_config_mac_address(pdata);
2087 xgbe_config_jumbo_enable(pdata);
2088 xgbe_config_flow_control(pdata);
2089 xgbe_config_checksum_offload(pdata);
2090 xgbe_config_vlan_support(pdata);
2091 xgbe_config_mmc(pdata);
2092 xgbe_enable_mac_interrupts(pdata);
2093
2094 DBGPR("<--xgbe_init\n");
2095
2096 return 0;
2097}
2098
2099void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2100{
2101 DBGPR("-->xgbe_init_function_ptrs\n");
2102
2103 hw_if->tx_complete = xgbe_tx_complete;
2104
2105 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
2106 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
2107 hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
2108 hw_if->set_mac_address = xgbe_set_mac_address;
2109
2110 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2111 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2112
2113 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2114 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2115
2116 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2117 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2118
2119 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2120 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2121 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2122
2123 hw_if->enable_tx = xgbe_enable_tx;
2124 hw_if->disable_tx = xgbe_disable_tx;
2125 hw_if->enable_rx = xgbe_enable_rx;
2126 hw_if->disable_rx = xgbe_disable_rx;
2127
2128 hw_if->powerup_tx = xgbe_powerup_tx;
2129 hw_if->powerdown_tx = xgbe_powerdown_tx;
2130 hw_if->powerup_rx = xgbe_powerup_rx;
2131 hw_if->powerdown_rx = xgbe_powerdown_rx;
2132
2133 hw_if->pre_xmit = xgbe_pre_xmit;
2134 hw_if->dev_read = xgbe_dev_read;
2135 hw_if->enable_int = xgbe_enable_int;
2136 hw_if->disable_int = xgbe_disable_int;
2137 hw_if->init = xgbe_init;
2138 hw_if->exit = xgbe_exit;
2139
2140 /* Descriptor related Sequences have to be initialized here */
2141 hw_if->tx_desc_init = xgbe_tx_desc_init;
2142 hw_if->rx_desc_init = xgbe_rx_desc_init;
2143 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2144 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2145 hw_if->is_last_desc = xgbe_is_last_desc;
2146 hw_if->is_context_desc = xgbe_is_context_desc;
2147
2148 /* For FLOW ctrl */
2149 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2150 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2151
2152 /* For RX coalescing */
2153 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2154 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2155 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2156 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2157
2158 /* For RX and TX threshold config */
2159 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2160 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2161
2162 /* For RX and TX Store and Forward Mode config */
2163 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2164 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2165
2166 /* For TX DMA Operating on Second Frame config */
2167 hw_if->config_osp_mode = xgbe_config_osp_mode;
2168
2169 /* For RX and TX PBL config */
2170 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2171 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2172 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2173 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2174 hw_if->config_pblx8 = xgbe_config_pblx8;
2175
2176 /* For MMC statistics support */
2177 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2178 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2179 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2180
2181 DBGPR("<--xgbe_init_function_ptrs\n");
2182}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000000..cfe3d93b5f52
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,1351 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/tcp.h>
119#include <linux/if_vlan.h>
120#include <linux/phy.h>
121#include <net/busy_poll.h>
122#include <linux/clk.h>
123#include <linux/if_ether.h>
124
125#include "xgbe.h"
126#include "xgbe-common.h"
127
128
129static int xgbe_poll(struct napi_struct *, int);
130static void xgbe_set_rx_mode(struct net_device *);
131
132static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
133{
134 return (ring->rdesc_count - (ring->cur - ring->dirty));
135}
136
137static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
138{
139 unsigned int rx_buf_size;
140
141 if (mtu > XGMAC_JUMBO_PACKET_MTU) {
142 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
143 return -EINVAL;
144 }
145
146 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
147 if (rx_buf_size < RX_MIN_BUF_SIZE)
148 rx_buf_size = RX_MIN_BUF_SIZE;
149 rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
150
151 return rx_buf_size;
152}
153
154static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
155{
156 struct xgbe_hw_if *hw_if = &pdata->hw_if;
157 struct xgbe_channel *channel;
158 unsigned int i;
159
160 channel = pdata->channel;
161 for (i = 0; i < pdata->channel_count; i++, channel++) {
162 if (channel->tx_ring)
163 hw_if->enable_int(channel,
164 XGMAC_INT_DMA_CH_SR_TI);
165 if (channel->rx_ring)
166 hw_if->enable_int(channel,
167 XGMAC_INT_DMA_CH_SR_RI);
168 }
169}
170
171static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
172{
173 struct xgbe_hw_if *hw_if = &pdata->hw_if;
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++) {
179 if (channel->tx_ring)
180 hw_if->disable_int(channel,
181 XGMAC_INT_DMA_CH_SR_TI);
182 if (channel->rx_ring)
183 hw_if->disable_int(channel,
184 XGMAC_INT_DMA_CH_SR_RI);
185 }
186}
187
188static irqreturn_t xgbe_isr(int irq, void *data)
189{
190 struct xgbe_prv_data *pdata = data;
191 struct xgbe_hw_if *hw_if = &pdata->hw_if;
192 struct xgbe_channel *channel;
193 unsigned int dma_isr, dma_ch_isr;
194 unsigned int mac_isr;
195 unsigned int i;
196
197 /* The DMA interrupt status register also reports MAC and MTL
198 * interrupts. So for polling mode, we just need to check for
199 * this register to be non-zero
200 */
201 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
202 if (!dma_isr)
203 goto isr_done;
204
205 DBGPR("-->xgbe_isr\n");
206
207 DBGPR(" DMA_ISR = %08x\n", dma_isr);
208 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
209 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
210
211 for (i = 0; i < pdata->channel_count; i++) {
212 if (!(dma_isr & (1 << i)))
213 continue;
214
215 channel = pdata->channel + i;
216
217 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
218 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
219
220 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
221 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
222 if (napi_schedule_prep(&pdata->napi)) {
223 /* Disable Tx and Rx interrupts */
224 xgbe_disable_rx_tx_ints(pdata);
225
226 /* Turn on polling */
227 __napi_schedule(&pdata->napi);
228 }
229 }
230
231 /* Restart the device on a Fatal Bus Error */
232 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
233 schedule_work(&pdata->restart_work);
234
235 /* Clear all interrupt signals */
236 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
237 }
238
239 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
240 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
241
242 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
243 hw_if->tx_mmc_int(pdata);
244
245 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
246 hw_if->rx_mmc_int(pdata);
247 }
248
249 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
250
251 DBGPR("<--xgbe_isr\n");
252
253isr_done:
254 return IRQ_HANDLED;
255}
256
257static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
258{
259 struct xgbe_channel *channel = container_of(timer,
260 struct xgbe_channel,
261 tx_timer);
262 struct xgbe_ring *ring = channel->tx_ring;
263 struct xgbe_prv_data *pdata = channel->pdata;
264 unsigned long flags;
265
266 DBGPR("-->xgbe_tx_timer\n");
267
268 spin_lock_irqsave(&ring->lock, flags);
269
270 if (napi_schedule_prep(&pdata->napi)) {
271 /* Disable Tx and Rx interrupts */
272 xgbe_disable_rx_tx_ints(pdata);
273
274 /* Turn on polling */
275 __napi_schedule(&pdata->napi);
276 }
277
278 channel->tx_timer_active = 0;
279
280 spin_unlock_irqrestore(&ring->lock, flags);
281
282 DBGPR("<--xgbe_tx_timer\n");
283
284 return HRTIMER_NORESTART;
285}
286
287static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
288{
289 struct xgbe_channel *channel;
290 unsigned int i;
291
292 DBGPR("-->xgbe_init_tx_timers\n");
293
294 channel = pdata->channel;
295 for (i = 0; i < pdata->channel_count; i++, channel++) {
296 if (!channel->tx_ring)
297 break;
298
299 DBGPR(" %s adding tx timer\n", channel->name);
300 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
301 HRTIMER_MODE_REL);
302 channel->tx_timer.function = xgbe_tx_timer;
303 }
304
305 DBGPR("<--xgbe_init_tx_timers\n");
306}
307
308static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
309{
310 struct xgbe_channel *channel;
311 unsigned int i;
312
313 DBGPR("-->xgbe_stop_tx_timers\n");
314
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->tx_ring)
318 break;
319
320 DBGPR(" %s deleting tx timer\n", channel->name);
321 channel->tx_timer_active = 0;
322 hrtimer_cancel(&channel->tx_timer);
323 }
324
325 DBGPR("<--xgbe_stop_tx_timers\n");
326}
327
328void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
329{
330 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
331 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
332
333 DBGPR("-->xgbe_get_all_hw_features\n");
334
335 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
336 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
337 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
338
339 memset(hw_feat, 0, sizeof(*hw_feat));
340
341 /* Hardware feature register 0 */
342 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
343 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
344 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
345 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
346 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
347 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
348 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
349 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
350 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
351 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
352 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
353 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
354 ADDMACADRSEL);
355 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
356 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
357
358 /* Hardware feature register 1 */
359 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
360 RXFIFOSIZE);
361 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
362 TXFIFOSIZE);
363 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
364 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
365 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
366 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
367 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
368 HASHTBLSZ);
369 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
370 L3L4FNUM);
371
372 /* Hardware feature register 2 */
373 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
374 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
375 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
376 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
377 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
378 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
379
380 /* The Queue and Channel counts are zero based so increment them
381 * to get the actual number
382 */
383 hw_feat->rx_q_cnt++;
384 hw_feat->tx_q_cnt++;
385 hw_feat->rx_ch_cnt++;
386 hw_feat->tx_ch_cnt++;
387
388 DBGPR("<--xgbe_get_all_hw_features\n");
389}
390
391static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
392{
393 if (add)
394 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
395 NAPI_POLL_WEIGHT);
396 napi_enable(&pdata->napi);
397}
398
399static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
400{
401 napi_disable(&pdata->napi);
402}
403
404void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
405{
406 struct xgbe_hw_if *hw_if = &pdata->hw_if;
407
408 DBGPR("-->xgbe_init_tx_coalesce\n");
409
410 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
411 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
412
413 hw_if->config_tx_coalesce(pdata);
414
415 DBGPR("<--xgbe_init_tx_coalesce\n");
416}
417
418void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
419{
420 struct xgbe_hw_if *hw_if = &pdata->hw_if;
421
422 DBGPR("-->xgbe_init_rx_coalesce\n");
423
424 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
425 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
426
427 hw_if->config_rx_coalesce(pdata);
428
429 DBGPR("<--xgbe_init_rx_coalesce\n");
430}
431
432static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
433{
434 struct xgbe_desc_if *desc_if = &pdata->desc_if;
435 struct xgbe_channel *channel;
436 struct xgbe_ring *ring;
437 struct xgbe_ring_data *rdata;
438 unsigned int i, j;
439
440 DBGPR("-->xgbe_free_tx_skbuff\n");
441
442 channel = pdata->channel;
443 for (i = 0; i < pdata->channel_count; i++, channel++) {
444 ring = channel->tx_ring;
445 if (!ring)
446 break;
447
448 for (j = 0; j < ring->rdesc_count; j++) {
449 rdata = GET_DESC_DATA(ring, j);
450 desc_if->unmap_skb(pdata, rdata);
451 }
452 }
453
454 DBGPR("<--xgbe_free_tx_skbuff\n");
455}
456
457static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
458{
459 struct xgbe_desc_if *desc_if = &pdata->desc_if;
460 struct xgbe_channel *channel;
461 struct xgbe_ring *ring;
462 struct xgbe_ring_data *rdata;
463 unsigned int i, j;
464
465 DBGPR("-->xgbe_free_rx_skbuff\n");
466
467 channel = pdata->channel;
468 for (i = 0; i < pdata->channel_count; i++, channel++) {
469 ring = channel->rx_ring;
470 if (!ring)
471 break;
472
473 for (j = 0; j < ring->rdesc_count; j++) {
474 rdata = GET_DESC_DATA(ring, j);
475 desc_if->unmap_skb(pdata, rdata);
476 }
477 }
478
479 DBGPR("<--xgbe_free_rx_skbuff\n");
480}
481
482int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
483{
484 struct xgbe_prv_data *pdata = netdev_priv(netdev);
485 struct xgbe_hw_if *hw_if = &pdata->hw_if;
486 unsigned long flags;
487
488 DBGPR("-->xgbe_powerdown\n");
489
490 if (!netif_running(netdev) ||
491 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
492 netdev_alert(netdev, "Device is already powered down\n");
493 DBGPR("<--xgbe_powerdown\n");
494 return -EINVAL;
495 }
496
497 phy_stop(pdata->phydev);
498
499 spin_lock_irqsave(&pdata->lock, flags);
500
501 if (caller == XGMAC_DRIVER_CONTEXT)
502 netif_device_detach(netdev);
503
504 netif_tx_stop_all_queues(netdev);
505 xgbe_napi_disable(pdata);
506
507 /* Powerdown Tx/Rx */
508 hw_if->powerdown_tx(pdata);
509 hw_if->powerdown_rx(pdata);
510
511 pdata->power_down = 1;
512
513 spin_unlock_irqrestore(&pdata->lock, flags);
514
515 DBGPR("<--xgbe_powerdown\n");
516
517 return 0;
518}
519
520int xgbe_powerup(struct net_device *netdev, unsigned int caller)
521{
522 struct xgbe_prv_data *pdata = netdev_priv(netdev);
523 struct xgbe_hw_if *hw_if = &pdata->hw_if;
524 unsigned long flags;
525
526 DBGPR("-->xgbe_powerup\n");
527
528 if (!netif_running(netdev) ||
529 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
530 netdev_alert(netdev, "Device is already powered up\n");
531 DBGPR("<--xgbe_powerup\n");
532 return -EINVAL;
533 }
534
535 spin_lock_irqsave(&pdata->lock, flags);
536
537 pdata->power_down = 0;
538
539 phy_start(pdata->phydev);
540
541 /* Enable Tx/Rx */
542 hw_if->powerup_tx(pdata);
543 hw_if->powerup_rx(pdata);
544
545 if (caller == XGMAC_DRIVER_CONTEXT)
546 netif_device_attach(netdev);
547
548 xgbe_napi_enable(pdata, 0);
549 netif_tx_start_all_queues(netdev);
550
551 spin_unlock_irqrestore(&pdata->lock, flags);
552
553 DBGPR("<--xgbe_powerup\n");
554
555 return 0;
556}
557
558static int xgbe_start(struct xgbe_prv_data *pdata)
559{
560 struct xgbe_hw_if *hw_if = &pdata->hw_if;
561 struct net_device *netdev = pdata->netdev;
562
563 DBGPR("-->xgbe_start\n");
564
565 xgbe_set_rx_mode(netdev);
566
567 hw_if->init(pdata);
568
569 phy_start(pdata->phydev);
570
571 hw_if->enable_tx(pdata);
572 hw_if->enable_rx(pdata);
573
574 xgbe_init_tx_timers(pdata);
575
576 xgbe_napi_enable(pdata, 1);
577 netif_tx_start_all_queues(netdev);
578
579 DBGPR("<--xgbe_start\n");
580
581 return 0;
582}
583
584static void xgbe_stop(struct xgbe_prv_data *pdata)
585{
586 struct xgbe_hw_if *hw_if = &pdata->hw_if;
587 struct net_device *netdev = pdata->netdev;
588
589 DBGPR("-->xgbe_stop\n");
590
591 phy_stop(pdata->phydev);
592
593 netif_tx_stop_all_queues(netdev);
594 xgbe_napi_disable(pdata);
595
596 xgbe_stop_tx_timers(pdata);
597
598 hw_if->disable_tx(pdata);
599 hw_if->disable_rx(pdata);
600
601 DBGPR("<--xgbe_stop\n");
602}
603
604static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
605{
606 struct xgbe_hw_if *hw_if = &pdata->hw_if;
607
608 DBGPR("-->xgbe_restart_dev\n");
609
610 /* If not running, "restart" will happen on open */
611 if (!netif_running(pdata->netdev))
612 return;
613
614 xgbe_stop(pdata);
615 synchronize_irq(pdata->irq_number);
616
617 xgbe_free_tx_skbuff(pdata);
618 xgbe_free_rx_skbuff(pdata);
619
620 /* Issue software reset to device if requested */
621 if (reset)
622 hw_if->exit(pdata);
623
624 xgbe_start(pdata);
625
626 DBGPR("<--xgbe_restart_dev\n");
627}
628
629static void xgbe_restart(struct work_struct *work)
630{
631 struct xgbe_prv_data *pdata = container_of(work,
632 struct xgbe_prv_data,
633 restart_work);
634
635 rtnl_lock();
636
637 xgbe_restart_dev(pdata, 1);
638
639 rtnl_unlock();
640}
641
642static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
643{
644 if (vlan_tx_tag_present(skb))
645 packet->vlan_ctag = vlan_tx_tag_get(skb);
646}
647
648static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
649{
650 int ret;
651
652 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
653 TSO_ENABLE))
654 return 0;
655
656 ret = skb_cow_head(skb, 0);
657 if (ret)
658 return ret;
659
660 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
661 packet->tcp_header_len = tcp_hdrlen(skb);
662 packet->tcp_payload_len = skb->len - packet->header_len;
663 packet->mss = skb_shinfo(skb)->gso_size;
664 DBGPR(" packet->header_len=%u\n", packet->header_len);
665 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
666 packet->tcp_header_len, packet->tcp_payload_len);
667 DBGPR(" packet->mss=%u\n", packet->mss);
668
669 return 0;
670}
671
672static int xgbe_is_tso(struct sk_buff *skb)
673{
674 if (skb->ip_summed != CHECKSUM_PARTIAL)
675 return 0;
676
677 if (!skb_is_gso(skb))
678 return 0;
679
680 DBGPR(" TSO packet to be processed\n");
681
682 return 1;
683}
684
685static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
686 struct xgbe_packet_data *packet)
687{
688 struct skb_frag_struct *frag;
689 unsigned int context_desc;
690 unsigned int len;
691 unsigned int i;
692
693 context_desc = 0;
694 packet->rdesc_count = 0;
695
696 if (xgbe_is_tso(skb)) {
697 /* TSO requires an extra desriptor if mss is different */
698 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
699 context_desc = 1;
700 packet->rdesc_count++;
701 }
702
703 /* TSO requires an extra desriptor for TSO header */
704 packet->rdesc_count++;
705
706 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
707 TSO_ENABLE, 1);
708 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
709 CSUM_ENABLE, 1);
710 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
711 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
712 CSUM_ENABLE, 1);
713
714 if (vlan_tx_tag_present(skb)) {
715 /* VLAN requires an extra descriptor if tag is different */
716 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
717 /* We can share with the TSO context descriptor */
718 if (!context_desc) {
719 context_desc = 1;
720 packet->rdesc_count++;
721 }
722
723 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
724 VLAN_CTAG, 1);
725 }
726
727 for (len = skb_headlen(skb); len;) {
728 packet->rdesc_count++;
729 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
730 }
731
732 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
733 frag = &skb_shinfo(skb)->frags[i];
734 for (len = skb_frag_size(frag); len; ) {
735 packet->rdesc_count++;
736 len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
737 }
738 }
739}
740
741static int xgbe_open(struct net_device *netdev)
742{
743 struct xgbe_prv_data *pdata = netdev_priv(netdev);
744 struct xgbe_hw_if *hw_if = &pdata->hw_if;
745 struct xgbe_desc_if *desc_if = &pdata->desc_if;
746 int ret;
747
748 DBGPR("-->xgbe_open\n");
749
750 /* Enable the clock */
751 ret = clk_prepare_enable(pdata->sysclock);
752 if (ret) {
753 netdev_alert(netdev, "clk_prepare_enable failed\n");
754 return ret;
755 }
756
757 /* Calculate the Rx buffer size before allocating rings */
758 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
759 if (ret < 0)
760 goto err_clk;
761 pdata->rx_buf_size = ret;
762
763 /* Allocate the ring descriptors and buffers */
764 ret = desc_if->alloc_ring_resources(pdata);
765 if (ret)
766 goto err_clk;
767
768 /* Initialize the device restart work struct */
769 INIT_WORK(&pdata->restart_work, xgbe_restart);
770
771 /* Request interrupts */
772 ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
773 netdev->name, pdata);
774 if (ret) {
775 netdev_alert(netdev, "error requesting irq %d\n",
776 pdata->irq_number);
777 goto err_irq;
778 }
779 pdata->irq_number = netdev->irq;
780
781 ret = xgbe_start(pdata);
782 if (ret)
783 goto err_start;
784
785 DBGPR("<--xgbe_open\n");
786
787 return 0;
788
789err_start:
790 hw_if->exit(pdata);
791
792 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
793 pdata->irq_number = 0;
794
795err_irq:
796 desc_if->free_ring_resources(pdata);
797
798err_clk:
799 clk_disable_unprepare(pdata->sysclock);
800
801 return ret;
802}
803
804static int xgbe_close(struct net_device *netdev)
805{
806 struct xgbe_prv_data *pdata = netdev_priv(netdev);
807 struct xgbe_hw_if *hw_if = &pdata->hw_if;
808 struct xgbe_desc_if *desc_if = &pdata->desc_if;
809
810 DBGPR("-->xgbe_close\n");
811
812 /* Stop the device */
813 xgbe_stop(pdata);
814
815 /* Issue software reset to device */
816 hw_if->exit(pdata);
817
818 /* Free all the ring data */
819 desc_if->free_ring_resources(pdata);
820
821 /* Release the interrupt */
822 if (pdata->irq_number != 0) {
823 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
824 pdata->irq_number = 0;
825 }
826
827 /* Disable the clock */
828 clk_disable_unprepare(pdata->sysclock);
829
830 DBGPR("<--xgbe_close\n");
831
832 return 0;
833}
834
835static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
836{
837 struct xgbe_prv_data *pdata = netdev_priv(netdev);
838 struct xgbe_hw_if *hw_if = &pdata->hw_if;
839 struct xgbe_desc_if *desc_if = &pdata->desc_if;
840 struct xgbe_channel *channel;
841 struct xgbe_ring *ring;
842 struct xgbe_packet_data *packet;
843 unsigned long flags;
844 int ret;
845
846 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
847
848 channel = pdata->channel + skb->queue_mapping;
849 ring = channel->tx_ring;
850 packet = &ring->packet_data;
851
852 ret = NETDEV_TX_OK;
853
854 spin_lock_irqsave(&ring->lock, flags);
855
856 if (skb->len == 0) {
857 netdev_err(netdev, "empty skb received from stack\n");
858 dev_kfree_skb_any(skb);
859 goto tx_netdev_return;
860 }
861
862 /* Calculate preliminary packet info */
863 memset(packet, 0, sizeof(*packet));
864 xgbe_packet_info(ring, skb, packet);
865
866 /* Check that there are enough descriptors available */
867 if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
868 DBGPR(" Tx queue stopped, not enough descriptors available\n");
869 netif_stop_subqueue(netdev, channel->queue_index);
870 ring->tx.queue_stopped = 1;
871 ret = NETDEV_TX_BUSY;
872 goto tx_netdev_return;
873 }
874
875 ret = xgbe_prep_tso(skb, packet);
876 if (ret) {
877 netdev_err(netdev, "error processing TSO packet\n");
878 dev_kfree_skb_any(skb);
879 goto tx_netdev_return;
880 }
881 xgbe_prep_vlan(skb, packet);
882
883 if (!desc_if->map_tx_skb(channel, skb)) {
884 dev_kfree_skb_any(skb);
885 goto tx_netdev_return;
886 }
887
888 /* Configure required descriptor fields for transmission */
889 hw_if->pre_xmit(channel);
890
891#ifdef XGMAC_ENABLE_TX_PKT_DUMP
892 xgbe_print_pkt(netdev, skb, true);
893#endif
894
895tx_netdev_return:
896 spin_unlock_irqrestore(&ring->lock, flags);
897
898 DBGPR("<--xgbe_xmit\n");
899
900 return ret;
901}
902
903static void xgbe_set_rx_mode(struct net_device *netdev)
904{
905 struct xgbe_prv_data *pdata = netdev_priv(netdev);
906 struct xgbe_hw_if *hw_if = &pdata->hw_if;
907 unsigned int pr_mode, am_mode;
908
909 DBGPR("-->xgbe_set_rx_mode\n");
910
911 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
912 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
913
914 if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
915 pr_mode = 1;
916 if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
917 am_mode = 1;
918 if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
919 pdata->hw_feat.addn_mac)
920 pr_mode = 1;
921
922 hw_if->set_promiscuous_mode(pdata, pr_mode);
923 hw_if->set_all_multicast_mode(pdata, am_mode);
924 if (!pr_mode)
925 hw_if->set_addn_mac_addrs(pdata, am_mode);
926
927 DBGPR("<--xgbe_set_rx_mode\n");
928}
929
930static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
931{
932 struct xgbe_prv_data *pdata = netdev_priv(netdev);
933 struct xgbe_hw_if *hw_if = &pdata->hw_if;
934 struct sockaddr *saddr = addr;
935
936 DBGPR("-->xgbe_set_mac_address\n");
937
938 if (!is_valid_ether_addr(saddr->sa_data))
939 return -EADDRNOTAVAIL;
940
941 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
942
943 hw_if->set_mac_address(pdata, netdev->dev_addr);
944
945 DBGPR("<--xgbe_set_mac_address\n");
946
947 return 0;
948}
949
950static int xgbe_change_mtu(struct net_device *netdev, int mtu)
951{
952 struct xgbe_prv_data *pdata = netdev_priv(netdev);
953 int ret;
954
955 DBGPR("-->xgbe_change_mtu\n");
956
957 ret = xgbe_calc_rx_buf_size(netdev, mtu);
958 if (ret < 0)
959 return ret;
960
961 pdata->rx_buf_size = ret;
962 netdev->mtu = mtu;
963
964 xgbe_restart_dev(pdata, 0);
965
966 DBGPR("<--xgbe_change_mtu\n");
967
968 return 0;
969}
970
971static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
972 struct rtnl_link_stats64 *s)
973{
974 struct xgbe_prv_data *pdata = netdev_priv(netdev);
975 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
976
977 DBGPR("-->%s\n", __func__);
978
979 pdata->hw_if.read_mmc_stats(pdata);
980
981 s->rx_packets = pstats->rxframecount_gb;
982 s->rx_bytes = pstats->rxoctetcount_gb;
983 s->rx_errors = pstats->rxframecount_gb -
984 pstats->rxbroadcastframes_g -
985 pstats->rxmulticastframes_g -
986 pstats->rxunicastframes_g;
987 s->multicast = pstats->rxmulticastframes_g;
988 s->rx_length_errors = pstats->rxlengtherror;
989 s->rx_crc_errors = pstats->rxcrcerror;
990 s->rx_fifo_errors = pstats->rxfifooverflow;
991
992 s->tx_packets = pstats->txframecount_gb;
993 s->tx_bytes = pstats->txoctetcount_gb;
994 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
995 s->tx_dropped = netdev->stats.tx_dropped;
996
997 DBGPR("<--%s\n", __func__);
998
999 return s;
1000}
1001
1002#ifdef CONFIG_NET_POLL_CONTROLLER
1003static void xgbe_poll_controller(struct net_device *netdev)
1004{
1005 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1006
1007 DBGPR("-->xgbe_poll_controller\n");
1008
1009 disable_irq(pdata->irq_number);
1010
1011 xgbe_isr(pdata->irq_number, pdata);
1012
1013 enable_irq(pdata->irq_number);
1014
1015 DBGPR("<--xgbe_poll_controller\n");
1016}
1017#endif /* End CONFIG_NET_POLL_CONTROLLER */
1018
1019static int xgbe_set_features(struct net_device *netdev,
1020 netdev_features_t features)
1021{
1022 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1023 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1024 unsigned int rxcsum_enabled, rxvlan_enabled;
1025
1026 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
1027 rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
1028
1029 if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
1030 hw_if->enable_rx_csum(pdata);
1031 netdev_alert(netdev, "state change - rxcsum enabled\n");
1032 } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
1033 hw_if->disable_rx_csum(pdata);
1034 netdev_alert(netdev, "state change - rxcsum disabled\n");
1035 }
1036
1037 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
1038 hw_if->enable_rx_vlan_stripping(pdata);
1039 netdev_alert(netdev, "state change - rxvlan enabled\n");
1040 } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
1041 hw_if->disable_rx_vlan_stripping(pdata);
1042 netdev_alert(netdev, "state change - rxvlan disabled\n");
1043 }
1044
1045 pdata->netdev_features = features;
1046
1047 DBGPR("<--xgbe_set_features\n");
1048
1049 return 0;
1050}
1051
1052static const struct net_device_ops xgbe_netdev_ops = {
1053 .ndo_open = xgbe_open,
1054 .ndo_stop = xgbe_close,
1055 .ndo_start_xmit = xgbe_xmit,
1056 .ndo_set_rx_mode = xgbe_set_rx_mode,
1057 .ndo_set_mac_address = xgbe_set_mac_address,
1058 .ndo_validate_addr = eth_validate_addr,
1059 .ndo_change_mtu = xgbe_change_mtu,
1060 .ndo_get_stats64 = xgbe_get_stats64,
1061#ifdef CONFIG_NET_POLL_CONTROLLER
1062 .ndo_poll_controller = xgbe_poll_controller,
1063#endif
1064 .ndo_set_features = xgbe_set_features,
1065};
1066
1067struct net_device_ops *xgbe_get_netdev_ops(void)
1068{
1069 return (struct net_device_ops *)&xgbe_netdev_ops;
1070}
1071
1072static int xgbe_tx_poll(struct xgbe_channel *channel)
1073{
1074 struct xgbe_prv_data *pdata = channel->pdata;
1075 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1076 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1077 struct xgbe_ring *ring = channel->tx_ring;
1078 struct xgbe_ring_data *rdata;
1079 struct xgbe_ring_desc *rdesc;
1080 struct net_device *netdev = pdata->netdev;
1081 unsigned long flags;
1082 int processed = 0;
1083
1084 DBGPR("-->xgbe_tx_poll\n");
1085
1086 /* Nothing to do if there isn't a Tx ring for this channel */
1087 if (!ring)
1088 return 0;
1089
1090 spin_lock_irqsave(&ring->lock, flags);
1091
1092 while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
1093 rdata = GET_DESC_DATA(ring, ring->dirty);
1094 rdesc = rdata->rdesc;
1095
1096 if (!hw_if->tx_complete(rdesc))
1097 break;
1098
1099#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1100 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1101#endif
1102
1103 /* Free the SKB and reset the descriptor for re-use */
1104 desc_if->unmap_skb(pdata, rdata);
1105 hw_if->tx_desc_reset(rdata);
1106
1107 processed++;
1108 ring->dirty++;
1109 }
1110
1111 if ((ring->tx.queue_stopped == 1) &&
1112 (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
1113 ring->tx.queue_stopped = 0;
1114 netif_wake_subqueue(netdev, channel->queue_index);
1115 }
1116
1117 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1118
1119 spin_unlock_irqrestore(&ring->lock, flags);
1120
1121 return processed;
1122}
1123
1124static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1125{
1126 struct xgbe_prv_data *pdata = channel->pdata;
1127 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1128 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1129 struct xgbe_ring *ring = channel->rx_ring;
1130 struct xgbe_ring_data *rdata;
1131 struct xgbe_packet_data *packet;
1132 struct net_device *netdev = pdata->netdev;
1133 struct sk_buff *skb;
1134 unsigned int incomplete, error;
1135 unsigned int cur_len, put_len, max_len;
1136 int received = 0;
1137
1138 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1139
1140 /* Nothing to do if there isn't a Rx ring for this channel */
1141 if (!ring)
1142 return 0;
1143
1144 packet = &ring->packet_data;
1145 while (received < budget) {
1146 DBGPR(" cur = %d\n", ring->cur);
1147
1148 /* Clear the packet data information */
1149 memset(packet, 0, sizeof(*packet));
1150 skb = NULL;
1151 error = 0;
1152 cur_len = 0;
1153
1154read_again:
1155 rdata = GET_DESC_DATA(ring, ring->cur);
1156
1157 if (hw_if->dev_read(channel))
1158 break;
1159
1160 received++;
1161 ring->cur++;
1162 ring->dirty++;
1163
1164 dma_unmap_single(pdata->dev, rdata->skb_dma,
1165 rdata->skb_dma_len, DMA_FROM_DEVICE);
1166 rdata->skb_dma = 0;
1167
1168 incomplete = XGMAC_GET_BITS(packet->attributes,
1169 RX_PACKET_ATTRIBUTES,
1170 INCOMPLETE);
1171
1172 /* Earlier error, just drain the remaining data */
1173 if (incomplete && error)
1174 goto read_again;
1175
1176 if (error || packet->errors) {
1177 if (packet->errors)
1178 DBGPR("Error in received packet\n");
1179 dev_kfree_skb(skb);
1180 continue;
1181 }
1182
1183 put_len = rdata->len - cur_len;
1184 if (skb) {
1185 if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
1186 DBGPR("pskb_expand_head error\n");
1187 if (incomplete) {
1188 error = 1;
1189 goto read_again;
1190 }
1191
1192 dev_kfree_skb(skb);
1193 continue;
1194 }
1195 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1196 put_len);
1197 } else {
1198 skb = rdata->skb;
1199 rdata->skb = NULL;
1200 }
1201 skb_put(skb, put_len);
1202 cur_len += put_len;
1203
1204 if (incomplete)
1205 goto read_again;
1206
1207 /* Be sure we don't exceed the configured MTU */
1208 max_len = netdev->mtu + ETH_HLEN;
1209 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1210 (skb->protocol == htons(ETH_P_8021Q)))
1211 max_len += VLAN_HLEN;
1212
1213 if (skb->len > max_len) {
1214 DBGPR("packet length exceeds configured MTU\n");
1215 dev_kfree_skb(skb);
1216 continue;
1217 }
1218
1219#ifdef XGMAC_ENABLE_RX_PKT_DUMP
1220 xgbe_print_pkt(netdev, skb, false);
1221#endif
1222
1223 skb_checksum_none_assert(skb);
1224 if (XGMAC_GET_BITS(packet->attributes,
1225 RX_PACKET_ATTRIBUTES, CSUM_DONE))
1226 skb->ip_summed = CHECKSUM_UNNECESSARY;
1227
1228 if (XGMAC_GET_BITS(packet->attributes,
1229 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1230 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1231 packet->vlan_ctag);
1232
1233 skb->dev = netdev;
1234 skb->protocol = eth_type_trans(skb, netdev);
1235 skb_record_rx_queue(skb, channel->queue_index);
1236 skb_mark_napi_id(skb, &pdata->napi);
1237
1238 netdev->last_rx = jiffies;
1239 napi_gro_receive(&pdata->napi, skb);
1240 }
1241
1242 if (received) {
1243 desc_if->realloc_skb(channel);
1244
1245 /* Update the Rx Tail Pointer Register with address of
1246 * the last cleaned entry */
1247 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1248 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1249 lower_32_bits(rdata->rdesc_dma));
1250 }
1251
1252 DBGPR("<--xgbe_rx_poll: received = %d\n", received);
1253
1254 return received;
1255}
1256
1257static int xgbe_poll(struct napi_struct *napi, int budget)
1258{
1259 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1260 napi);
1261 struct xgbe_channel *channel;
1262 int processed;
1263 unsigned int i;
1264
1265 DBGPR("-->xgbe_poll: budget=%d\n", budget);
1266
1267 /* Cleanup Tx ring first */
1268 channel = pdata->channel;
1269 for (i = 0; i < pdata->channel_count; i++, channel++)
1270 xgbe_tx_poll(channel);
1271
1272 /* Process Rx ring next */
1273 processed = 0;
1274 channel = pdata->channel;
1275 for (i = 0; i < pdata->channel_count; i++, channel++)
1276 processed += xgbe_rx_poll(channel, budget - processed);
1277
1278 /* If we processed everything, we are done */
1279 if (processed < budget) {
1280 /* Turn off polling */
1281 napi_complete(napi);
1282
1283 /* Enable Tx and Rx interrupts */
1284 xgbe_enable_rx_tx_ints(pdata);
1285 }
1286
1287 DBGPR("<--xgbe_poll: received = %d\n", processed);
1288
1289 return processed;
1290}
1291
1292void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1293 unsigned int count, unsigned int flag)
1294{
1295 struct xgbe_ring_data *rdata;
1296 struct xgbe_ring_desc *rdesc;
1297
1298 while (count--) {
1299 rdata = GET_DESC_DATA(ring, idx);
1300 rdesc = rdata->rdesc;
1301 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1302 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1303 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1304 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1305 idx++;
1306 }
1307}
1308
1309void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1310 unsigned int idx)
1311{
1312 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1313 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1314 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1315}
1316
1317void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1318{
1319 struct ethhdr *eth = (struct ethhdr *)skb->data;
1320 unsigned char *buf = skb->data;
1321 unsigned char buffer[128];
1322 unsigned int i, j;
1323
1324 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1325
1326 netdev_alert(netdev, "%s packet of %d bytes\n",
1327 (tx_rx ? "TX" : "RX"), skb->len);
1328
1329 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1330 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1331 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1332
1333 for (i = 0, j = 0; i < skb->len;) {
1334 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1335 buf[i++]);
1336
1337 if ((i % 32) == 0) {
1338 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
1339 j = 0;
1340 } else if ((i % 16) == 0) {
1341 buffer[j++] = ' ';
1342 buffer[j++] = ' ';
1343 } else if ((i % 4) == 0) {
1344 buffer[j++] = ' ';
1345 }
1346 }
1347 if (i % 32)
1348 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
1349
1350 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1351}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000000..8909f2b51af1
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,510 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/phy.h>
119
120#include "xgbe.h"
121#include "xgbe-common.h"
122
123
124struct xgbe_stats {
125 char stat_string[ETH_GSTRING_LEN];
126 int stat_size;
127 int stat_offset;
128};
129
130#define XGMAC_MMC_STAT(_string, _var) \
131 { _string, \
132 FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
133 offsetof(struct xgbe_prv_data, mmc_stats._var), \
134 }
135
136static const struct xgbe_stats xgbe_gstring_stats[] = {
137 XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
138 XGMAC_MMC_STAT("tx_packets", txframecount_gb),
139 XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
140 XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
141 XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
142 XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
143 XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
144 XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
145 XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
146 XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
147 XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
148 XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
149 XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
150 XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
151
152 XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
153 XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
154 XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
155 XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
156 XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
157 XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
158 XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
159 XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
160 XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
161 XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
162 XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
163 XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
164 XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
165 XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
166 XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
167 XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
168 XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
169 XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
170 XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
171 XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
172 XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
173 XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
174};
175#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
176
177static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
178{
179 int i;
180
181 DBGPR("-->%s\n", __func__);
182
183 switch (stringset) {
184 case ETH_SS_STATS:
185 for (i = 0; i < XGBE_STATS_COUNT; i++) {
186 memcpy(data, xgbe_gstring_stats[i].stat_string,
187 ETH_GSTRING_LEN);
188 data += ETH_GSTRING_LEN;
189 }
190 break;
191 }
192
193 DBGPR("<--%s\n", __func__);
194}
195
196static void xgbe_get_ethtool_stats(struct net_device *netdev,
197 struct ethtool_stats *stats, u64 *data)
198{
199 struct xgbe_prv_data *pdata = netdev_priv(netdev);
200 u8 *stat;
201 int i;
202
203 DBGPR("-->%s\n", __func__);
204
205 pdata->hw_if.read_mmc_stats(pdata);
206 for (i = 0; i < XGBE_STATS_COUNT; i++) {
207 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
208 *data++ = *(u64 *)stat;
209 }
210
211 DBGPR("<--%s\n", __func__);
212}
213
214static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
215{
216 int ret;
217
218 DBGPR("-->%s\n", __func__);
219
220 switch (stringset) {
221 case ETH_SS_STATS:
222 ret = XGBE_STATS_COUNT;
223 break;
224
225 default:
226 ret = -EOPNOTSUPP;
227 }
228
229 DBGPR("<--%s\n", __func__);
230
231 return ret;
232}
233
234static void xgbe_get_pauseparam(struct net_device *netdev,
235 struct ethtool_pauseparam *pause)
236{
237 struct xgbe_prv_data *pdata = netdev_priv(netdev);
238
239 DBGPR("-->xgbe_get_pauseparam\n");
240
241 pause->autoneg = pdata->pause_autoneg;
242 pause->tx_pause = pdata->tx_pause;
243 pause->rx_pause = pdata->rx_pause;
244
245 DBGPR("<--xgbe_get_pauseparam\n");
246}
247
248static int xgbe_set_pauseparam(struct net_device *netdev,
249 struct ethtool_pauseparam *pause)
250{
251 struct xgbe_prv_data *pdata = netdev_priv(netdev);
252 struct phy_device *phydev = pdata->phydev;
253 int ret = 0;
254
255 DBGPR("-->xgbe_set_pauseparam\n");
256
257 DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
258 pause->autoneg, pause->tx_pause, pause->rx_pause);
259
260 pdata->pause_autoneg = pause->autoneg;
261 if (pause->autoneg) {
262 phydev->advertising |= ADVERTISED_Pause;
263 phydev->advertising |= ADVERTISED_Asym_Pause;
264
265 } else {
266 phydev->advertising &= ~ADVERTISED_Pause;
267 phydev->advertising &= ~ADVERTISED_Asym_Pause;
268
269 pdata->tx_pause = pause->tx_pause;
270 pdata->rx_pause = pause->rx_pause;
271 }
272
273 if (netif_running(netdev))
274 ret = phy_start_aneg(phydev);
275
276 DBGPR("<--xgbe_set_pauseparam\n");
277
278 return ret;
279}
280
281static int xgbe_get_settings(struct net_device *netdev,
282 struct ethtool_cmd *cmd)
283{
284 struct xgbe_prv_data *pdata = netdev_priv(netdev);
285 int ret;
286
287 DBGPR("-->xgbe_get_settings\n");
288
289 if (!pdata->phydev)
290 return -ENODEV;
291
292 spin_lock_irq(&pdata->lock);
293
294 ret = phy_ethtool_gset(pdata->phydev, cmd);
295 cmd->transceiver = XCVR_EXTERNAL;
296
297 spin_unlock_irq(&pdata->lock);
298
299 DBGPR("<--xgbe_get_settings\n");
300
301 return ret;
302}
303
304static int xgbe_set_settings(struct net_device *netdev,
305 struct ethtool_cmd *cmd)
306{
307 struct xgbe_prv_data *pdata = netdev_priv(netdev);
308 struct phy_device *phydev = pdata->phydev;
309 u32 speed;
310 int ret;
311
312 DBGPR("-->xgbe_set_settings\n");
313
314 if (!pdata->phydev)
315 return -ENODEV;
316
317 spin_lock_irq(&pdata->lock);
318
319 speed = ethtool_cmd_speed(cmd);
320
321 ret = -EINVAL;
322 if (cmd->phy_address != phydev->addr)
323 goto unlock;
324
325 if ((cmd->autoneg != AUTONEG_ENABLE) &&
326 (cmd->autoneg != AUTONEG_DISABLE))
327 goto unlock;
328
329 if ((cmd->autoneg == AUTONEG_DISABLE) &&
330 (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
331 (cmd->duplex != DUPLEX_FULL)))
332 goto unlock;
333
334 if (cmd->autoneg == AUTONEG_ENABLE) {
335 /* Clear settings needed to force speeds */
336 phydev->supported &= ~SUPPORTED_1000baseT_Full;
337 phydev->supported &= ~SUPPORTED_10000baseT_Full;
338 } else {
339 /* Add settings needed to force speed */
340 phydev->supported |= SUPPORTED_1000baseT_Full;
341 phydev->supported |= SUPPORTED_10000baseT_Full;
342 }
343
344 cmd->advertising &= phydev->supported;
345 if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
346 goto unlock;
347
348 ret = 0;
349 phydev->autoneg = cmd->autoneg;
350 phydev->speed = speed;
351 phydev->duplex = cmd->duplex;
352 phydev->advertising = cmd->advertising;
353
354 if (cmd->autoneg == AUTONEG_ENABLE)
355 phydev->advertising |= ADVERTISED_Autoneg;
356 else
357 phydev->advertising &= ~ADVERTISED_Autoneg;
358
359 if (netif_running(netdev))
360 ret = phy_start_aneg(phydev);
361
362unlock:
363 spin_unlock_irq(&pdata->lock);
364
365 DBGPR("<--xgbe_set_settings\n");
366
367 return ret;
368}
369
370static void xgbe_get_drvinfo(struct net_device *netdev,
371 struct ethtool_drvinfo *drvinfo)
372{
373 struct xgbe_prv_data *pdata = netdev_priv(netdev);
374
375 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
376 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
377 strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
378 sizeof(drvinfo->bus_info));
379 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
380 XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
381 XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
382 XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
383 drvinfo->n_stats = XGBE_STATS_COUNT;
384}
385
386static int xgbe_get_coalesce(struct net_device *netdev,
387 struct ethtool_coalesce *ec)
388{
389 struct xgbe_prv_data *pdata = netdev_priv(netdev);
390 struct xgbe_hw_if *hw_if = &pdata->hw_if;
391 unsigned int riwt;
392
393 DBGPR("-->xgbe_get_coalesce\n");
394
395 memset(ec, 0, sizeof(struct ethtool_coalesce));
396
397 riwt = pdata->rx_riwt;
398 ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
399 ec->rx_max_coalesced_frames = pdata->rx_frames;
400
401 ec->tx_coalesce_usecs = pdata->tx_usecs;
402 ec->tx_max_coalesced_frames = pdata->tx_frames;
403
404 DBGPR("<--xgbe_get_coalesce\n");
405
406 return 0;
407}
408
409static int xgbe_set_coalesce(struct net_device *netdev,
410 struct ethtool_coalesce *ec)
411{
412 struct xgbe_prv_data *pdata = netdev_priv(netdev);
413 struct xgbe_hw_if *hw_if = &pdata->hw_if;
414 unsigned int rx_frames, rx_riwt, rx_usecs;
415 unsigned int tx_frames, tx_usecs;
416
417 DBGPR("-->xgbe_set_coalesce\n");
418
419 /* Check for not supported parameters */
420 if ((ec->rx_coalesce_usecs_irq) ||
421 (ec->rx_max_coalesced_frames_irq) ||
422 (ec->tx_coalesce_usecs_irq) ||
423 (ec->tx_max_coalesced_frames_irq) ||
424 (ec->stats_block_coalesce_usecs) ||
425 (ec->use_adaptive_rx_coalesce) ||
426 (ec->use_adaptive_tx_coalesce) ||
427 (ec->pkt_rate_low) ||
428 (ec->rx_coalesce_usecs_low) ||
429 (ec->rx_max_coalesced_frames_low) ||
430 (ec->tx_coalesce_usecs_low) ||
431 (ec->tx_max_coalesced_frames_low) ||
432 (ec->pkt_rate_high) ||
433 (ec->rx_coalesce_usecs_high) ||
434 (ec->rx_max_coalesced_frames_high) ||
435 (ec->tx_coalesce_usecs_high) ||
436 (ec->tx_max_coalesced_frames_high) ||
437 (ec->rate_sample_interval))
438 return -EOPNOTSUPP;
439
440 /* Can only change rx-frames when interface is down (see
441 * rx_descriptor_init in xgbe-dev.c)
442 */
443 rx_frames = pdata->rx_frames;
444 if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
445 netdev_alert(netdev,
446 "interface must be down to change rx-frames\n");
447 return -EINVAL;
448 }
449
450 rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
451 rx_frames = ec->rx_max_coalesced_frames;
452
453 /* Use smallest possible value if conversion resulted in zero */
454 if (ec->rx_coalesce_usecs && !rx_riwt)
455 rx_riwt = 1;
456
457 /* Check the bounds of values for Rx */
458 if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
459 rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
460 netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
461 rx_usecs);
462 return -EINVAL;
463 }
464 if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
465 netdev_alert(netdev, "rx-frames is limited to %d frames\n",
466 pdata->channel->rx_ring->rdesc_count);
467 return -EINVAL;
468 }
469
470 tx_usecs = ec->tx_coalesce_usecs;
471 tx_frames = ec->tx_max_coalesced_frames;
472
473 /* Check the bounds of values for Tx */
474 if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
475 netdev_alert(netdev, "tx-frames is limited to %d frames\n",
476 pdata->channel->tx_ring->rdesc_count);
477 return -EINVAL;
478 }
479
480 pdata->rx_riwt = rx_riwt;
481 pdata->rx_frames = rx_frames;
482 hw_if->config_rx_coalesce(pdata);
483
484 pdata->tx_usecs = tx_usecs;
485 pdata->tx_frames = tx_frames;
486 hw_if->config_tx_coalesce(pdata);
487
488 DBGPR("<--xgbe_set_coalesce\n");
489
490 return 0;
491}
492
493static const struct ethtool_ops xgbe_ethtool_ops = {
494 .get_settings = xgbe_get_settings,
495 .set_settings = xgbe_set_settings,
496 .get_drvinfo = xgbe_get_drvinfo,
497 .get_link = ethtool_op_get_link,
498 .get_coalesce = xgbe_get_coalesce,
499 .set_coalesce = xgbe_set_coalesce,
500 .get_pauseparam = xgbe_get_pauseparam,
501 .set_pauseparam = xgbe_set_pauseparam,
502 .get_strings = xgbe_get_strings,
503 .get_ethtool_stats = xgbe_get_ethtool_stats,
504 .get_sset_count = xgbe_get_sset_count,
505};
506
507struct ethtool_ops *xgbe_get_ethtool_ops(void)
508{
509 return (struct ethtool_ops *)&xgbe_ethtool_ops;
510}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000000..c83584a26713
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,512 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/module.h>
118#include <linux/device.h>
119#include <linux/platform_device.h>
120#include <linux/spinlock.h>
121#include <linux/netdevice.h>
122#include <linux/etherdevice.h>
123#include <linux/io.h>
124#include <linux/of.h>
125#include <linux/of_net.h>
126#include <linux/clk.h>
127
128#include "xgbe.h"
129#include "xgbe-common.h"
130
131
132MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
133MODULE_LICENSE("Dual BSD/GPL");
134MODULE_VERSION(XGBE_DRV_VERSION);
135MODULE_DESCRIPTION(XGBE_DRV_DESC);
136
137static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
138{
139 struct xgbe_channel *channel_mem, *channel;
140 struct xgbe_ring *tx_ring, *rx_ring;
141 unsigned int count, i;
142
143 DBGPR("-->xgbe_alloc_rings\n");
144
145 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
146
147 channel_mem = devm_kcalloc(pdata->dev, count,
148 sizeof(struct xgbe_channel), GFP_KERNEL);
149 if (!channel_mem)
150 return NULL;
151
152 tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
153 sizeof(struct xgbe_ring), GFP_KERNEL);
154 if (!tx_ring)
155 return NULL;
156
157 rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
158 sizeof(struct xgbe_ring), GFP_KERNEL);
159 if (!rx_ring)
160 return NULL;
161
162 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
163 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
164 channel->pdata = pdata;
165 channel->queue_index = i;
166 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
167 (DMA_CH_INC * i);
168
169 if (i < pdata->tx_ring_count) {
170 spin_lock_init(&tx_ring->lock);
171 channel->tx_ring = tx_ring++;
172 }
173
174 if (i < pdata->rx_ring_count) {
175 spin_lock_init(&tx_ring->lock);
176 channel->rx_ring = rx_ring++;
177 }
178
179 DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
180 channel->name, channel->queue_index, channel->dma_regs,
181 channel->tx_ring, channel->rx_ring);
182 }
183
184 pdata->channel_count = count;
185
186 DBGPR("<--xgbe_alloc_rings\n");
187
188 return channel_mem;
189}
190
191static void xgbe_default_config(struct xgbe_prv_data *pdata)
192{
193 DBGPR("-->xgbe_default_config\n");
194
195 pdata->pblx8 = DMA_PBL_X8_ENABLE;
196 pdata->tx_sf_mode = MTL_TSF_ENABLE;
197 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
198 pdata->tx_pbl = DMA_PBL_16;
199 pdata->tx_osp_mode = DMA_OSP_ENABLE;
200 pdata->rx_sf_mode = MTL_RSF_DISABLE;
201 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
202 pdata->rx_pbl = DMA_PBL_16;
203 pdata->pause_autoneg = 1;
204 pdata->tx_pause = 1;
205 pdata->rx_pause = 1;
206 pdata->power_down = 0;
207 pdata->default_autoneg = AUTONEG_ENABLE;
208 pdata->default_speed = SPEED_10000;
209
210 DBGPR("<--xgbe_default_config\n");
211}
212
213static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
214{
215 xgbe_init_function_ptrs_dev(&pdata->hw_if);
216 xgbe_init_function_ptrs_desc(&pdata->desc_if);
217}
218
219static int xgbe_probe(struct platform_device *pdev)
220{
221 struct xgbe_prv_data *pdata;
222 struct xgbe_hw_if *hw_if;
223 struct xgbe_desc_if *desc_if;
224 struct net_device *netdev;
225 struct device *dev = &pdev->dev;
226 struct resource *res;
227 const u8 *mac_addr;
228 int ret;
229
230 DBGPR("--> xgbe_probe\n");
231
232 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
233 XGBE_MAX_DMA_CHANNELS);
234 if (!netdev) {
235 dev_err(dev, "alloc_etherdev failed\n");
236 ret = -ENOMEM;
237 goto err_alloc;
238 }
239 SET_NETDEV_DEV(netdev, dev);
240 pdata = netdev_priv(netdev);
241 pdata->netdev = netdev;
242 pdata->pdev = pdev;
243 pdata->dev = dev;
244 platform_set_drvdata(pdev, netdev);
245
246 spin_lock_init(&pdata->lock);
247 mutex_init(&pdata->xpcs_mutex);
248
249 /* Set and validate the number of descriptors for a ring */
250 BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
251 pdata->tx_desc_count = TX_DESC_CNT;
252 if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
253 dev_err(dev, "tx descriptor count (%d) is not valid\n",
254 pdata->tx_desc_count);
255 ret = -EINVAL;
256 goto err_io;
257 }
258 BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
259 pdata->rx_desc_count = RX_DESC_CNT;
260 if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
261 dev_err(dev, "rx descriptor count (%d) is not valid\n",
262 pdata->rx_desc_count);
263 ret = -EINVAL;
264 goto err_io;
265 }
266
267 /* Obtain the system clock setting */
268 pdata->sysclock = devm_clk_get(dev, NULL);
269 if (IS_ERR(pdata->sysclock)) {
270 dev_err(dev, "devm_clk_get failed\n");
271 ret = PTR_ERR(pdata->sysclock);
272 goto err_io;
273 }
274
275 /* Obtain the mmio areas for the device */
276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
278 if (IS_ERR(pdata->xgmac_regs)) {
279 dev_err(dev, "xgmac ioremap failed\n");
280 ret = PTR_ERR(pdata->xgmac_regs);
281 goto err_io;
282 }
283 DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
284
285 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
286 pdata->xpcs_regs = devm_ioremap_resource(dev, res);
287 if (IS_ERR(pdata->xpcs_regs)) {
288 dev_err(dev, "xpcs ioremap failed\n");
289 ret = PTR_ERR(pdata->xpcs_regs);
290 goto err_io;
291 }
292 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
293
294 /* Set the DMA mask */
295 if (!dev->dma_mask)
296 dev->dma_mask = &dev->coherent_dma_mask;
297 *(dev->dma_mask) = DMA_BIT_MASK(40);
298 dev->coherent_dma_mask = DMA_BIT_MASK(40);
299
300 ret = platform_get_irq(pdev, 0);
301 if (ret < 0) {
302 dev_err(dev, "platform_get_irq failed\n");
303 goto err_io;
304 }
305 netdev->irq = ret;
306 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
307
308 /* Set all the function pointers */
309 xgbe_init_all_fptrs(pdata);
310 hw_if = &pdata->hw_if;
311 desc_if = &pdata->desc_if;
312
313 /* Issue software reset to device */
314 hw_if->exit(pdata);
315
316 /* Populate the hardware features */
317 xgbe_get_all_hw_features(pdata);
318
319 /* Retrieve the MAC address */
320 mac_addr = of_get_mac_address(dev->of_node);
321 if (!mac_addr) {
322 dev_err(dev, "invalid mac address for this device\n");
323 ret = -EINVAL;
324 goto err_io;
325 }
326 memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
327
328 /* Retrieve the PHY mode - it must be "xgmii" */
329 pdata->phy_mode = of_get_phy_mode(dev->of_node);
330 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
331 dev_err(dev, "invalid phy-mode specified for this device\n");
332 ret = -EINVAL;
333 goto err_io;
334 }
335
336 /* Set default configuration data */
337 xgbe_default_config(pdata);
338
339 /* Calculate the number of Tx and Rx rings to be created */
340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
341 pdata->hw_feat.tx_ch_cnt);
342 if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
343 dev_err(dev, "error setting real tx queue count\n");
344 goto err_io;
345 }
346
347 pdata->rx_ring_count = min_t(unsigned int,
348 netif_get_num_default_rss_queues(),
349 pdata->hw_feat.rx_ch_cnt);
350 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
351 if (ret) {
352 dev_err(dev, "error setting real rx queue count\n");
353 goto err_io;
354 }
355
356 /* Allocate the rings for the DMA channels */
357 pdata->channel = xgbe_alloc_rings(pdata);
358 if (!pdata->channel) {
359 dev_err(dev, "ring allocation failed\n");
360 ret = -ENOMEM;
361 goto err_io;
362 }
363
364 /* Prepare to regsiter with MDIO */
365 pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
366 if (!pdata->mii_bus_id) {
367 dev_err(dev, "failed to allocate mii bus id\n");
368 ret = -ENOMEM;
369 goto err_io;
370 }
371 ret = xgbe_mdio_register(pdata);
372 if (ret)
373 goto err_bus_id;
374
375 /* Set network and ethtool operations */
376 netdev->netdev_ops = xgbe_get_netdev_ops();
377 netdev->ethtool_ops = xgbe_get_ethtool_ops();
378
379 /* Set device features */
380 netdev->hw_features = NETIF_F_SG |
381 NETIF_F_IP_CSUM |
382 NETIF_F_IPV6_CSUM |
383 NETIF_F_RXCSUM |
384 NETIF_F_TSO |
385 NETIF_F_TSO6 |
386 NETIF_F_GRO |
387 NETIF_F_HW_VLAN_CTAG_RX |
388 NETIF_F_HW_VLAN_CTAG_TX;
389
390 netdev->vlan_features |= NETIF_F_SG |
391 NETIF_F_IP_CSUM |
392 NETIF_F_IPV6_CSUM |
393 NETIF_F_TSO |
394 NETIF_F_TSO6;
395
396 netdev->features |= netdev->hw_features;
397 pdata->netdev_features = netdev->features;
398
399 xgbe_init_rx_coalesce(pdata);
400 xgbe_init_tx_coalesce(pdata);
401
402 netif_carrier_off(netdev);
403 ret = register_netdev(netdev);
404 if (ret) {
405 dev_err(dev, "net device registration failed\n");
406 goto err_reg_netdev;
407 }
408
409 xgbe_debugfs_init(pdata);
410
411 netdev_notice(netdev, "net device enabled\n");
412
413 DBGPR("<-- xgbe_probe\n");
414
415 return 0;
416
417err_reg_netdev:
418 xgbe_mdio_unregister(pdata);
419
420err_bus_id:
421 kfree(pdata->mii_bus_id);
422
423err_io:
424 free_netdev(netdev);
425
426err_alloc:
427 dev_notice(dev, "net device not enabled\n");
428
429 return ret;
430}
431
432static int xgbe_remove(struct platform_device *pdev)
433{
434 struct net_device *netdev = platform_get_drvdata(pdev);
435 struct xgbe_prv_data *pdata = netdev_priv(netdev);
436
437 DBGPR("-->xgbe_remove\n");
438
439 xgbe_debugfs_exit(pdata);
440
441 unregister_netdev(netdev);
442
443 xgbe_mdio_unregister(pdata);
444
445 kfree(pdata->mii_bus_id);
446
447 free_netdev(netdev);
448
449 DBGPR("<--xgbe_remove\n");
450
451 return 0;
452}
453
454#ifdef CONFIG_PM
455static int xgbe_suspend(struct device *dev)
456{
457 struct net_device *netdev = dev_get_drvdata(dev);
458 int ret;
459
460 DBGPR("-->xgbe_suspend\n");
461
462 if (!netif_running(netdev)) {
463 DBGPR("<--xgbe_dev_suspend\n");
464 return -EINVAL;
465 }
466
467 ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
468
469 DBGPR("<--xgbe_suspend\n");
470
471 return ret;
472}
473
474static int xgbe_resume(struct device *dev)
475{
476 struct net_device *netdev = dev_get_drvdata(dev);
477 int ret;
478
479 DBGPR("-->xgbe_resume\n");
480
481 if (!netif_running(netdev)) {
482 DBGPR("<--xgbe_dev_resume\n");
483 return -EINVAL;
484 }
485
486 ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
487
488 DBGPR("<--xgbe_resume\n");
489
490 return ret;
491}
492#endif /* CONFIG_PM */
493
494static const struct of_device_id xgbe_of_match[] = {
495 { .compatible = "amd,xgbe-seattle-v1a", },
496 {},
497};
498
499MODULE_DEVICE_TABLE(of, xgbe_of_match);
500static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
501
502static struct platform_driver xgbe_driver = {
503 .driver = {
504 .name = "amd-xgbe",
505 .of_match_table = xgbe_of_match,
506 .pm = &xgbe_pm_ops,
507 },
508 .probe = xgbe_probe,
509 .remove = xgbe_remove,
510};
511
512module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000000..ea7a5d6750ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,433 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/module.h>
118#include <linux/kmod.h>
119#include <linux/spinlock.h>
120#include <linux/mdio.h>
121#include <linux/phy.h>
122#include <linux/of.h>
123
124#include "xgbe.h"
125#include "xgbe-common.h"
126
127
128static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
129{
130 struct xgbe_prv_data *pdata = mii->priv;
131 struct xgbe_hw_if *hw_if = &pdata->hw_if;
132 int mmd_data;
133
134 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
135 prtad, mmd_reg);
136
137 mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
138
139 DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
140
141 return mmd_data;
142}
143
144static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
145 u16 mmd_val)
146{
147 struct xgbe_prv_data *pdata = mii->priv;
148 struct xgbe_hw_if *hw_if = &pdata->hw_if;
149 int mmd_data = mmd_val;
150
151 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
152 prtad, mmd_reg, mmd_data);
153
154 hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
155
156 DBGPR_MDIO("<--xgbe_mdio_write\n");
157
158 return 0;
159}
160
161static void xgbe_adjust_link(struct net_device *netdev)
162{
163 struct xgbe_prv_data *pdata = netdev_priv(netdev);
164 struct xgbe_hw_if *hw_if = &pdata->hw_if;
165 struct phy_device *phydev = pdata->phydev;
166 unsigned long flags;
167 int new_state = 0;
168
169 if (phydev == NULL)
170 return;
171
172 DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
173 phydev->addr, phydev->link, pdata->phy_link);
174
175 spin_lock_irqsave(&pdata->lock, flags);
176
177 if (phydev->link) {
178 /* Flow control support */
179 if (pdata->pause_autoneg) {
180 if (phydev->pause || phydev->asym_pause) {
181 pdata->tx_pause = 1;
182 pdata->rx_pause = 1;
183 } else {
184 pdata->tx_pause = 0;
185 pdata->rx_pause = 0;
186 }
187 }
188
189 if (pdata->tx_pause != pdata->phy_tx_pause) {
190 hw_if->config_tx_flow_control(pdata);
191 pdata->phy_tx_pause = pdata->tx_pause;
192 }
193
194 if (pdata->rx_pause != pdata->phy_rx_pause) {
195 hw_if->config_rx_flow_control(pdata);
196 pdata->phy_rx_pause = pdata->rx_pause;
197 }
198
199 /* Speed support */
200 if (phydev->speed != pdata->phy_speed) {
201 new_state = 1;
202
203 switch (phydev->speed) {
204 case SPEED_10000:
205 hw_if->set_xgmii_speed(pdata);
206 break;
207
208 case SPEED_2500:
209 hw_if->set_gmii_2500_speed(pdata);
210 break;
211
212 case SPEED_1000:
213 hw_if->set_gmii_speed(pdata);
214 break;
215 }
216 pdata->phy_speed = phydev->speed;
217 }
218
219 if (phydev->link != pdata->phy_link) {
220 new_state = 1;
221 pdata->phy_link = 1;
222 }
223 } else if (pdata->phy_link) {
224 new_state = 1;
225 pdata->phy_link = 0;
226 pdata->phy_speed = SPEED_UNKNOWN;
227 }
228
229 if (new_state)
230 phy_print_status(phydev);
231
232 spin_unlock_irqrestore(&pdata->lock, flags);
233
234 DBGPR_MDIO("<--xgbe_adjust_link\n");
235}
236
237void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
238{
239 struct device *dev = pdata->dev;
240 struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
241 int i;
242
243 dev_alert(dev, "\n************* PHY Reg dump **********************\n");
244
245 dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
246 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
247 dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
248 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
249 dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
250 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
251 dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
252 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
253 dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
254 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
255 dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
256 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
257
258 dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
259 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
260 dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
261 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
262 dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
263 MDIO_AN_ADVERTISE,
264 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
265 dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
266 MDIO_AN_ADVERTISE + 1,
267 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
268 dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
269 MDIO_AN_ADVERTISE + 2,
270 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
271 dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
272 MDIO_AN_COMP_STAT,
273 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
274
275 dev_alert(dev, "MMD Device Mask = %#x\n",
276 phydev->c45_ids.devices_in_package);
277 for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
278 dev_alert(dev, " MMD %d: ID = %#08x\n", i,
279 phydev->c45_ids.device_ids[i]);
280
281 dev_alert(dev, "\n*************************************************\n");
282}
283
284int xgbe_mdio_register(struct xgbe_prv_data *pdata)
285{
286 struct net_device *netdev = pdata->netdev;
287 struct device_node *phy_node;
288 struct mii_bus *mii;
289 struct phy_device *phydev;
290 int ret = 0;
291
292 DBGPR("-->xgbe_mdio_register\n");
293
294 /* Retrieve the phy-handle */
295 phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
296 if (!phy_node) {
297 dev_err(pdata->dev, "unable to parse phy-handle\n");
298 return -EINVAL;
299 }
300
301 /* Register with the MDIO bus */
302 mii = mdiobus_alloc();
303 if (mii == NULL) {
304 dev_err(pdata->dev, "mdiobus_alloc failed\n");
305 ret = -ENOMEM;
306 goto err_node_get;
307 }
308
309 /* Register on the MDIO bus (don't probe any PHYs) */
310 mii->name = XGBE_PHY_NAME;
311 mii->read = xgbe_mdio_read;
312 mii->write = xgbe_mdio_write;
313 snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
314 mii->priv = pdata;
315 mii->phy_mask = ~0;
316 mii->parent = pdata->dev;
317 ret = mdiobus_register(mii);
318 if (ret) {
319 dev_err(pdata->dev, "mdiobus_register failed\n");
320 goto err_mdiobus_alloc;
321 }
322 DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
323
324 /* Probe the PCS using Clause 45 */
325 phydev = get_phy_device(mii, XGBE_PRTAD, true);
326 if (IS_ERR(phydev) || !phydev ||
327 !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
328 dev_err(pdata->dev, "get_phy_device failed\n");
329 ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
330 goto err_mdiobus_register;
331 }
332 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
333 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
334
335 of_node_get(phy_node);
336 phydev->dev.of_node = phy_node;
337 ret = phy_device_register(phydev);
338 if (ret) {
339 dev_err(pdata->dev, "phy_device_register failed\n");
340 of_node_put(phy_node);
341 goto err_phy_device;
342 }
343
344 /* Add a reference to the PHY driver so it can't be unloaded */
345 pdata->phy_module = phydev->dev.driver ?
346 phydev->dev.driver->owner : NULL;
347 if (!try_module_get(pdata->phy_module)) {
348 dev_err(pdata->dev, "try_module_get failed\n");
349 ret = -EIO;
350 goto err_phy_device;
351 }
352
353 pdata->mii = mii;
354 pdata->mdio_mmd = MDIO_MMD_PCS;
355
356 pdata->phy_link = -1;
357 pdata->phy_speed = SPEED_UNKNOWN;
358 pdata->phy_tx_pause = pdata->tx_pause;
359 pdata->phy_rx_pause = pdata->rx_pause;
360
361 ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
362 pdata->phy_mode);
363 if (ret) {
364 netdev_err(netdev, "phy_connect_direct failed\n");
365 goto err_phy_device;
366 }
367
368 if (!phydev->drv || (phydev->drv->phy_id == 0)) {
369 netdev_err(netdev, "phy_id not valid\n");
370 ret = -ENODEV;
371 goto err_phy_connect;
372 }
373 DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
374 dev_name(&phydev->dev), phydev->link);
375
376 phydev->autoneg = pdata->default_autoneg;
377 if (phydev->autoneg == AUTONEG_DISABLE) {
378 /* Add settings needed to force speed */
379 phydev->supported |= SUPPORTED_1000baseT_Full;
380 phydev->supported |= SUPPORTED_10000baseT_Full;
381
382 phydev->speed = pdata->default_speed;
383 phydev->duplex = DUPLEX_FULL;
384
385 phydev->advertising &= ~ADVERTISED_Autoneg;
386 }
387
388 pdata->phydev = phydev;
389
390 of_node_put(phy_node);
391
392 DBGPHY_REGS(pdata);
393
394 DBGPR("<--xgbe_mdio_register\n");
395
396 return 0;
397
398err_phy_connect:
399 phy_disconnect(phydev);
400
401err_phy_device:
402 phy_device_free(phydev);
403
404err_mdiobus_register:
405 mdiobus_unregister(mii);
406
407err_mdiobus_alloc:
408 mdiobus_free(mii);
409
410err_node_get:
411 of_node_put(phy_node);
412
413 return ret;
414}
415
416void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
417{
418 DBGPR("-->xgbe_mdio_unregister\n");
419
420 phy_disconnect(pdata->phydev);
421 pdata->phydev = NULL;
422
423 module_put(pdata->phy_module);
424 pdata->phy_module = NULL;
425
426 mdiobus_unregister(pdata->mii);
427 pdata->mii->priv = NULL;
428
429 mdiobus_free(pdata->mii);
430 pdata->mii = NULL;
431
432 DBGPR("<--xgbe_mdio_unregister\n");
433}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000000..ab0627162c01
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,676 @@
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#ifndef __XGBE_H__
118#define __XGBE_H__
119
120#include <linux/dma-mapping.h>
121#include <linux/netdevice.h>
122#include <linux/workqueue.h>
123#include <linux/phy.h>
124
125
126#define XGBE_DRV_NAME "amd-xgbe"
127#define XGBE_DRV_VERSION "1.0.0-a"
128#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
129
130/* Descriptor related defines */
131#define TX_DESC_CNT 512
132#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
133#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
134#define RX_DESC_CNT 512
135
136#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
137
138#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
139#define RX_BUF_ALIGN 64
140
141#define XGBE_MAX_DMA_CHANNELS 16
142#define DMA_ARDOMAIN_SETTING 0x2
143#define DMA_ARCACHE_SETTING 0xb
144#define DMA_AWDOMAIN_SETTING 0x2
145#define DMA_AWCACHE_SETTING 0x7
146#define DMA_INTERRUPT_MASK 0x31c7
147
148#define XGMAC_MIN_PACKET 60
149#define XGMAC_STD_PACKET_MTU 1500
150#define XGMAC_MAX_STD_PACKET 1518
151#define XGMAC_JUMBO_PACKET_MTU 9000
152#define XGMAC_MAX_JUMBO_PACKET 9018
153
154#define MAX_MULTICAST_LIST 14
155#define TX_FLAGS_IP_PKT 0x00000001
156#define TX_FLAGS_TCP_PKT 0x00000002
157
158/* MDIO bus phy name */
159#define XGBE_PHY_NAME "amd_xgbe_phy"
160#define XGBE_PRTAD 0
161
162/* Driver PMT macros */
163#define XGMAC_DRIVER_CONTEXT 1
164#define XGMAC_IOCTL_CONTEXT 2
165
166#define FIFO_SIZE_B(x) (x)
167#define FIFO_SIZE_KB(x) (x * 1024)
168
169#define XGBE_TC_CNT 2
170
171/* Helper macro for descriptor handling
172 * Always use GET_DESC_DATA to access the descriptor data
173 * since the index is free-running and needs to be and-ed
174 * with the descriptor count value of the ring to index to
175 * the proper descriptor data.
176 */
177#define GET_DESC_DATA(_ring, _idx) \
178 ((_ring)->rdata + \
179 ((_idx) & ((_ring)->rdesc_count - 1)))
180
181
182/* Default coalescing parameters */
183#define XGMAC_INIT_DMA_TX_USECS 100
184#define XGMAC_INIT_DMA_TX_FRAMES 16
185
186#define XGMAC_MAX_DMA_RIWT 0xff
187#define XGMAC_INIT_DMA_RX_USECS 100
188#define XGMAC_INIT_DMA_RX_FRAMES 16
189
190/* Flow control queue count */
191#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
192
193
194struct xgbe_prv_data;
195
196struct xgbe_packet_data {
197 unsigned int attributes;
198
199 unsigned int errors;
200
201 unsigned int rdesc_count;
202 unsigned int length;
203
204 unsigned int header_len;
205 unsigned int tcp_header_len;
206 unsigned int tcp_payload_len;
207 unsigned short mss;
208
209 unsigned short vlan_ctag;
210};
211
212/* Common Rx and Tx descriptor mapping */
213struct xgbe_ring_desc {
214 unsigned int desc0;
215 unsigned int desc1;
216 unsigned int desc2;
217 unsigned int desc3;
218};
219
220/* Structure used to hold information related to the descriptor
221 * and the packet associated with the descriptor (always use
222 * use the GET_DESC_DATA macro to access this data from the ring)
223 */
224struct xgbe_ring_data {
225 struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
226 dma_addr_t rdesc_dma; /* DMA address of descriptor */
227
228 struct sk_buff *skb; /* Virtual address of SKB */
229 dma_addr_t skb_dma; /* DMA address of SKB data */
230 unsigned int skb_dma_len; /* Length of SKB DMA area */
231 unsigned int tso_header; /* TSO header indicator */
232
233 unsigned short len; /* Length of received Rx packet */
234
235 unsigned int interrupt; /* Interrupt indicator */
236
237 unsigned int mapped_as_page;
238};
239
240struct xgbe_ring {
241 /* Ring lock - used just for TX rings at the moment */
242 spinlock_t lock;
243
244 /* Per packet related information */
245 struct xgbe_packet_data packet_data;
246
247 /* Virtual/DMA addresses and count of allocated descriptor memory */
248 struct xgbe_ring_desc *rdesc;
249 dma_addr_t rdesc_dma;
250 unsigned int rdesc_count;
251
252 /* Array of descriptor data corresponding the descriptor memory
253 * (always use the GET_DESC_DATA macro to access this data)
254 */
255 struct xgbe_ring_data *rdata;
256
257 /* Ring index values
258 * cur - Tx: index of descriptor to be used for current transfer
259 * Rx: index of descriptor to check for packet availability
260 * dirty - Tx: index of descriptor to check for transfer complete
261 * Rx: count of descriptors in which a packet has been received
262 * (used with skb_realloc_index to refresh the ring)
263 */
264 unsigned int cur;
265 unsigned int dirty;
266
267 /* Coalesce frame count used for interrupt bit setting */
268 unsigned int coalesce_count;
269
270 union {
271 struct {
272 unsigned int queue_stopped;
273 unsigned short cur_mss;
274 unsigned short cur_vlan_ctag;
275 } tx;
276
277 struct {
278 unsigned int realloc_index;
279 unsigned int realloc_threshold;
280 } rx;
281 };
282} ____cacheline_aligned;
283
284/* Structure used to describe the descriptor rings associated with
285 * a DMA channel.
286 */
287struct xgbe_channel {
288 char name[16];
289
290 /* Address of private data area for device */
291 struct xgbe_prv_data *pdata;
292
293 /* Queue index and base address of queue's DMA registers */
294 unsigned int queue_index;
295 void __iomem *dma_regs;
296
297 unsigned int saved_ier;
298
299 unsigned int tx_timer_active;
300 struct hrtimer tx_timer;
301
302 struct xgbe_ring *tx_ring;
303 struct xgbe_ring *rx_ring;
304} ____cacheline_aligned;
305
306enum xgbe_int {
307 XGMAC_INT_DMA_ISR_DC0IS,
308 XGMAC_INT_DMA_CH_SR_TI,
309 XGMAC_INT_DMA_CH_SR_TPS,
310 XGMAC_INT_DMA_CH_SR_TBU,
311 XGMAC_INT_DMA_CH_SR_RI,
312 XGMAC_INT_DMA_CH_SR_RBU,
313 XGMAC_INT_DMA_CH_SR_RPS,
314 XGMAC_INT_DMA_CH_SR_FBE,
315 XGMAC_INT_DMA_ALL,
316};
317
318enum xgbe_int_state {
319 XGMAC_INT_STATE_SAVE,
320 XGMAC_INT_STATE_RESTORE,
321};
322
323enum xgbe_mtl_fifo_size {
324 XGMAC_MTL_FIFO_SIZE_256 = 0x00,
325 XGMAC_MTL_FIFO_SIZE_512 = 0x01,
326 XGMAC_MTL_FIFO_SIZE_1K = 0x03,
327 XGMAC_MTL_FIFO_SIZE_2K = 0x07,
328 XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
329 XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
330 XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
331 XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
332 XGMAC_MTL_FIFO_SIZE_64K = 0xff,
333 XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
334 XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
335};
336
337struct xgbe_mmc_stats {
338 /* Tx Stats */
339 u64 txoctetcount_gb;
340 u64 txframecount_gb;
341 u64 txbroadcastframes_g;
342 u64 txmulticastframes_g;
343 u64 tx64octets_gb;
344 u64 tx65to127octets_gb;
345 u64 tx128to255octets_gb;
346 u64 tx256to511octets_gb;
347 u64 tx512to1023octets_gb;
348 u64 tx1024tomaxoctets_gb;
349 u64 txunicastframes_gb;
350 u64 txmulticastframes_gb;
351 u64 txbroadcastframes_gb;
352 u64 txunderflowerror;
353 u64 txoctetcount_g;
354 u64 txframecount_g;
355 u64 txpauseframes;
356 u64 txvlanframes_g;
357
358 /* Rx Stats */
359 u64 rxframecount_gb;
360 u64 rxoctetcount_gb;
361 u64 rxoctetcount_g;
362 u64 rxbroadcastframes_g;
363 u64 rxmulticastframes_g;
364 u64 rxcrcerror;
365 u64 rxrunterror;
366 u64 rxjabbererror;
367 u64 rxundersize_g;
368 u64 rxoversize_g;
369 u64 rx64octets_gb;
370 u64 rx65to127octets_gb;
371 u64 rx128to255octets_gb;
372 u64 rx256to511octets_gb;
373 u64 rx512to1023octets_gb;
374 u64 rx1024tomaxoctets_gb;
375 u64 rxunicastframes_g;
376 u64 rxlengtherror;
377 u64 rxoutofrangetype;
378 u64 rxpauseframes;
379 u64 rxfifooverflow;
380 u64 rxvlanframes_gb;
381 u64 rxwatchdogerror;
382};
383
384struct xgbe_hw_if {
385 int (*tx_complete)(struct xgbe_ring_desc *);
386
387 int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
388 int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
389 int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
390 int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
391
392 int (*enable_rx_csum)(struct xgbe_prv_data *);
393 int (*disable_rx_csum)(struct xgbe_prv_data *);
394
395 int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
396 int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
397
398 int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
399 void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
400 int (*set_gmii_speed)(struct xgbe_prv_data *);
401 int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
402 int (*set_xgmii_speed)(struct xgbe_prv_data *);
403
404 void (*enable_tx)(struct xgbe_prv_data *);
405 void (*disable_tx)(struct xgbe_prv_data *);
406 void (*enable_rx)(struct xgbe_prv_data *);
407 void (*disable_rx)(struct xgbe_prv_data *);
408
409 void (*powerup_tx)(struct xgbe_prv_data *);
410 void (*powerdown_tx)(struct xgbe_prv_data *);
411 void (*powerup_rx)(struct xgbe_prv_data *);
412 void (*powerdown_rx)(struct xgbe_prv_data *);
413
414 int (*init)(struct xgbe_prv_data *);
415 int (*exit)(struct xgbe_prv_data *);
416
417 int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
418 int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
419 void (*pre_xmit)(struct xgbe_channel *);
420 int (*dev_read)(struct xgbe_channel *);
421 void (*tx_desc_init)(struct xgbe_channel *);
422 void (*rx_desc_init)(struct xgbe_channel *);
423 void (*rx_desc_reset)(struct xgbe_ring_data *);
424 void (*tx_desc_reset)(struct xgbe_ring_data *);
425 int (*is_last_desc)(struct xgbe_ring_desc *);
426 int (*is_context_desc)(struct xgbe_ring_desc *);
427
428 /* For FLOW ctrl */
429 int (*config_tx_flow_control)(struct xgbe_prv_data *);
430 int (*config_rx_flow_control)(struct xgbe_prv_data *);
431
432 /* For RX coalescing */
433 int (*config_rx_coalesce)(struct xgbe_prv_data *);
434 int (*config_tx_coalesce)(struct xgbe_prv_data *);
435 unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
436 unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
437
438 /* For RX and TX threshold config */
439 int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
440 int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
441
442 /* For RX and TX Store and Forward Mode config */
443 int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
444 int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
445
446 /* For TX DMA Operate on Second Frame config */
447 int (*config_osp_mode)(struct xgbe_prv_data *);
448
449 /* For RX and TX PBL config */
450 int (*config_rx_pbl_val)(struct xgbe_prv_data *);
451 int (*get_rx_pbl_val)(struct xgbe_prv_data *);
452 int (*config_tx_pbl_val)(struct xgbe_prv_data *);
453 int (*get_tx_pbl_val)(struct xgbe_prv_data *);
454 int (*config_pblx8)(struct xgbe_prv_data *);
455
456 /* For MMC statistics */
457 void (*rx_mmc_int)(struct xgbe_prv_data *);
458 void (*tx_mmc_int)(struct xgbe_prv_data *);
459 void (*read_mmc_stats)(struct xgbe_prv_data *);
460};
461
462struct xgbe_desc_if {
463 int (*alloc_ring_resources)(struct xgbe_prv_data *);
464 void (*free_ring_resources)(struct xgbe_prv_data *);
465 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
466 void (*realloc_skb)(struct xgbe_channel *);
467 void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
468 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
469 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
470};
471
472/* This structure contains flags that indicate what hardware features
473 * or configurations are present in the device.
474 */
475struct xgbe_hw_features {
476 /* HW Feature Register0 */
477 unsigned int gmii; /* 1000 Mbps support */
478 unsigned int vlhash; /* VLAN Hash Filter */
479 unsigned int sma; /* SMA(MDIO) Interface */
480 unsigned int rwk; /* PMT remote wake-up packet */
481 unsigned int mgk; /* PMT magic packet */
482 unsigned int mmc; /* RMON module */
483 unsigned int aoe; /* ARP Offload */
484 unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
485 unsigned int eee; /* Energy Efficient Ethernet */
486 unsigned int tx_coe; /* Tx Checksum Offload */
487 unsigned int rx_coe; /* Rx Checksum Offload */
488 unsigned int addn_mac; /* Additional MAC Addresses */
489 unsigned int ts_src; /* Timestamp Source */
490 unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
491
492 /* HW Feature Register1 */
493 unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
494 unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
495 unsigned int adv_ts_hi; /* Advance Timestamping High Word */
496 unsigned int dcb; /* DCB Feature */
497 unsigned int sph; /* Split Header Feature */
498 unsigned int tso; /* TCP Segmentation Offload */
499 unsigned int dma_debug; /* DMA Debug Registers */
500 unsigned int rss; /* Receive Side Scaling */
501 unsigned int hash_table_size; /* Hash Table Size */
502 unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
503
504 /* HW Feature Register2 */
505 unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
506 unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
507 unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
508 unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
509 unsigned int pps_out_num; /* Number of PPS outputs */
510 unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
511};
512
513struct xgbe_prv_data {
514 struct net_device *netdev;
515 struct platform_device *pdev;
516 struct device *dev;
517
518 /* XGMAC/XPCS related mmio registers */
519 void __iomem *xgmac_regs; /* XGMAC CSRs */
520 void __iomem *xpcs_regs; /* XPCS MMD registers */
521
522 /* Overall device lock */
523 spinlock_t lock;
524
525 /* XPCS indirect addressing mutex */
526 struct mutex xpcs_mutex;
527
528 int irq_number;
529
530 struct xgbe_hw_if hw_if;
531 struct xgbe_desc_if desc_if;
532
533 /* Rings for Tx/Rx on a DMA channel */
534 struct xgbe_channel *channel;
535 unsigned int channel_count;
536 unsigned int tx_ring_count;
537 unsigned int tx_desc_count;
538 unsigned int rx_ring_count;
539 unsigned int rx_desc_count;
540
541 /* Tx/Rx common settings */
542 unsigned int pblx8;
543
544 /* Tx settings */
545 unsigned int tx_sf_mode;
546 unsigned int tx_threshold;
547 unsigned int tx_pbl;
548 unsigned int tx_osp_mode;
549
550 /* Rx settings */
551 unsigned int rx_sf_mode;
552 unsigned int rx_threshold;
553 unsigned int rx_pbl;
554
555 /* Tx coalescing settings */
556 unsigned int tx_usecs;
557 unsigned int tx_frames;
558
559 /* Rx coalescing settings */
560 unsigned int rx_riwt;
561 unsigned int rx_frames;
562
563 /* Current MTU */
564 unsigned int rx_buf_size;
565
566 /* Flow control settings */
567 unsigned int pause_autoneg;
568 unsigned int tx_pause;
569 unsigned int rx_pause;
570
571 /* MDIO settings */
572 struct module *phy_module;
573 char *mii_bus_id;
574 struct mii_bus *mii;
575 int mdio_mmd;
576 struct phy_device *phydev;
577 int default_autoneg;
578 int default_speed;
579
580 /* Current PHY settings */
581 phy_interface_t phy_mode;
582 int phy_link;
583 int phy_speed;
584 unsigned int phy_tx_pause;
585 unsigned int phy_rx_pause;
586
587 /* Netdev related settings */
588 netdev_features_t netdev_features;
589 struct napi_struct napi;
590 struct xgbe_mmc_stats mmc_stats;
591
592 /* System clock value used for Rx watchdog */
593 struct clk *sysclock;
594
595 /* Hardware features of the device */
596 struct xgbe_hw_features hw_feat;
597
598 /* Device restart work structure */
599 struct work_struct restart_work;
600
601 /* Keeps track of power mode */
602 unsigned int power_down;
603
604#ifdef CONFIG_DEBUG_FS
605 struct dentry *xgbe_debugfs;
606
607 unsigned int debugfs_xgmac_reg;
608
609 unsigned int debugfs_xpcs_mmd;
610 unsigned int debugfs_xpcs_reg;
611#endif
612};
613
614/* Function prototypes*/
615
616void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
617void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
618struct net_device_ops *xgbe_get_netdev_ops(void);
619struct ethtool_ops *xgbe_get_ethtool_ops(void);
620
621int xgbe_mdio_register(struct xgbe_prv_data *);
622void xgbe_mdio_unregister(struct xgbe_prv_data *);
623void xgbe_dump_phy_registers(struct xgbe_prv_data *);
624void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
625 unsigned int);
626void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
627 unsigned int);
628void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
629void xgbe_get_all_hw_features(struct xgbe_prv_data *);
630int xgbe_powerup(struct net_device *, unsigned int);
631int xgbe_powerdown(struct net_device *, unsigned int);
632void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
633void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
634
635#ifdef CONFIG_DEBUG_FS
636void xgbe_debugfs_init(struct xgbe_prv_data *);
637void xgbe_debugfs_exit(struct xgbe_prv_data *);
638#else
639static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
640static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
641#endif /* CONFIG_DEBUG_FS */
642
643/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
644#if 0
645#define XGMAC_ENABLE_TX_DESC_DUMP
646#define XGMAC_ENABLE_RX_DESC_DUMP
647#endif
648
649/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
650#if 0
651#define XGMAC_ENABLE_TX_PKT_DUMP
652#define XGMAC_ENABLE_RX_PKT_DUMP
653#endif
654
655/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
656#if 0
657#define YDEBUG
658#define YDEBUG_MDIO
659#endif
660
661/* For debug prints */
662#ifdef YDEBUG
663#define DBGPR(x...) pr_alert(x)
664#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
665#else
666#define DBGPR(x...) do { } while (0)
667#define DBGPHY_REGS(x...) do { } while (0)
668#endif
669
670#ifdef YDEBUG_MDIO
671#define DBGPR_MDIO(x...) pr_alert(x)
672#else
673#define DBGPR_MDIO(x...) do { } while (0)
674#endif
675
676#endif
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d647a7d115ac..18e2faccebb0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -13,6 +13,7 @@
13 * Vineet Gupta 13 * Vineet Gupta
14 */ 14 */
15 15
16#include <linux/crc32.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
362 return IRQ_HANDLED; 363 return IRQ_HANDLED;
363} 364}
364 365
366#ifdef CONFIG_NET_POLL_CONTROLLER
367static void arc_emac_poll_controller(struct net_device *dev)
368{
369 disable_irq(dev->irq);
370 arc_emac_intr(dev->irq, dev);
371 enable_irq(dev->irq);
372}
373#endif
374
365/** 375/**
366 * arc_emac_open - Open the network device. 376 * arc_emac_open - Open the network device.
367 * @ndev: Pointer to the network device. 377 * @ndev: Pointer to the network device.
@@ -451,6 +461,41 @@ static int arc_emac_open(struct net_device *ndev)
451} 461}
452 462
453/** 463/**
464 * arc_emac_set_rx_mode - Change the receive filtering mode.
465 * @ndev: Pointer to the network device.
466 *
467 * This function enables/disables promiscuous or all-multicast mode
468 * and updates the multicast filtering list of the network device.
469 */
470static void arc_emac_set_rx_mode(struct net_device *ndev)
471{
472 struct arc_emac_priv *priv = netdev_priv(ndev);
473
474 if (ndev->flags & IFF_PROMISC) {
475 arc_reg_or(priv, R_CTRL, PROM_MASK);
476 } else {
477 arc_reg_clr(priv, R_CTRL, PROM_MASK);
478
479 if (ndev->flags & IFF_ALLMULTI) {
480 arc_reg_set(priv, R_LAFL, ~0);
481 arc_reg_set(priv, R_LAFH, ~0);
482 } else {
483 struct netdev_hw_addr *ha;
484 unsigned int filter[2] = { 0, 0 };
485 int bit;
486
487 netdev_for_each_mc_addr(ha, ndev) {
488 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
489 filter[bit >> 5] |= 1 << (bit & 31);
490 }
491
492 arc_reg_set(priv, R_LAFL, filter[0]);
493 arc_reg_set(priv, R_LAFH, filter[1]);
494 }
495 }
496}
497
498/**
454 * arc_emac_stop - Close the network device. 499 * arc_emac_stop - Close the network device.
455 * @ndev: Pointer to the network device. 500 * @ndev: Pointer to the network device.
456 * 501 *
@@ -620,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
620 .ndo_start_xmit = arc_emac_tx, 665 .ndo_start_xmit = arc_emac_tx,
621 .ndo_set_mac_address = arc_emac_set_address, 666 .ndo_set_mac_address = arc_emac_set_address,
622 .ndo_get_stats = arc_emac_stats, 667 .ndo_get_stats = arc_emac_stats,
668 .ndo_set_rx_mode = arc_emac_set_rx_mode,
669#ifdef CONFIG_NET_POLL_CONTROLLER
670 .ndo_poll_controller = arc_emac_poll_controller,
671#endif
623}; 672};
624 673
625static int arc_emac_probe(struct platform_device *pdev) 674static int arc_emac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 17bb9ce96260..49faa97a30c3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1302 } 1302 }
1303 1303
1304 netdev->netdev_ops = &alx_netdev_ops; 1304 netdev->netdev_ops = &alx_netdev_ops;
1305 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); 1305 netdev->ethtool_ops = &alx_ethtool_ops;
1306 netdev->irq = pdev->irq; 1306 netdev->irq = pdev->irq;
1307 netdev->watchdog_timeo = ALX_WATCHDOG_TIME; 1307 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1308 1308
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 859ea844ba0f..48694c239d5c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -56,8 +56,8 @@ static int atl1c_get_settings(struct net_device *netdev,
56 else 56 else
57 ecmd->duplex = DUPLEX_HALF; 57 ecmd->duplex = DUPLEX_HALF;
58 } else { 58 } else {
59 ethtool_cmd_speed_set(ecmd, -1); 59 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
60 ecmd->duplex = -1; 60 ecmd->duplex = DUPLEX_UNKNOWN;
61 } 61 }
62 62
63 ecmd->autoneg = AUTONEG_ENABLE; 63 ecmd->autoneg = AUTONEG_ENABLE;
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
305 305
306void atl1c_set_ethtool_ops(struct net_device *netdev) 306void atl1c_set_ethtool_ops(struct net_device *netdev)
307{ 307{
308 SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops); 308 netdev->ethtool_ops = &atl1c_ethtool_ops;
309} 309}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 82b23861bf55..1be072f4afc2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -57,8 +57,8 @@ static int atl1e_get_settings(struct net_device *netdev,
57 else 57 else
58 ecmd->duplex = DUPLEX_HALF; 58 ecmd->duplex = DUPLEX_HALF;
59 } else { 59 } else {
60 ethtool_cmd_speed_set(ecmd, -1); 60 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
61 ecmd->duplex = -1; 61 ecmd->duplex = DUPLEX_UNKNOWN;
62 } 62 }
63 63
64 ecmd->autoneg = AUTONEG_ENABLE; 64 ecmd->autoneg = AUTONEG_ENABLE;
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
388 388
389void atl1e_set_ethtool_ops(struct net_device *netdev) 389void atl1e_set_ethtool_ops(struct net_device *netdev)
390{ 390{
391 SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); 391 netdev->ethtool_ops = &atl1e_ethtool_ops;
392} 392}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index dfd0e91fa726..b460db7919a2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3258,8 +3258,8 @@ static int atl1_get_settings(struct net_device *netdev,
3258 else 3258 else
3259 ecmd->duplex = DUPLEX_HALF; 3259 ecmd->duplex = DUPLEX_HALF;
3260 } else { 3260 } else {
3261 ethtool_cmd_speed_set(ecmd, -1); 3261 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
3262 ecmd->duplex = -1; 3262 ecmd->duplex = DUPLEX_UNKNOWN;
3263 } 3263 }
3264 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3264 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3265 hw->media_type == MEDIA_TYPE_1000M_FULL) 3265 hw->media_type == MEDIA_TYPE_1000M_FULL)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 78befb522a52..6746bd717146 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1396 atl2_setup_pcicmd(pdev); 1396 atl2_setup_pcicmd(pdev);
1397 1397
1398 netdev->netdev_ops = &atl2_netdev_ops; 1398 netdev->netdev_ops = &atl2_netdev_ops;
1399 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); 1399 netdev->ethtool_ops = &atl2_ethtool_ops;
1400 netdev->watchdog_timeo = 5 * HZ; 1400 netdev->watchdog_timeo = 5 * HZ;
1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1402 1402
@@ -1769,8 +1769,8 @@ static int atl2_get_settings(struct net_device *netdev,
1769 else 1769 else
1770 ecmd->duplex = DUPLEX_HALF; 1770 ecmd->duplex = DUPLEX_HALF;
1771 } else { 1771 } else {
1772 ethtool_cmd_speed_set(ecmd, -1); 1772 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
1773 ecmd->duplex = -1; 1773 ecmd->duplex = DUPLEX_UNKNOWN;
1774 } 1774 }
1775 1775
1776 ecmd->autoneg = AUTONEG_ENABLE; 1776 ecmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
150 In case of using this driver on BCM4706 it's also requires to enable 150 In case of using this driver on BCM4706 it's also requires to enable
151 BCMA_DRIVER_GMAC_CMN to make it work. 151 BCMA_DRIVER_GMAC_CMN to make it work.
152 152
153config SYSTEMPORT
154 tristate "Broadcom SYSTEMPORT internal MAC support"
155 depends on OF
156 select MII
157 select PHYLIB
158 select FIXED_PHY if SYSTEMPORT=y
159 help
160 This driver supports the built-in Ethernet MACs found in the
161 Broadcom BCM7xxx Set Top Box family chipset using an internal
162 Ethernet switch.
163
153endif # NET_VENDOR_BROADCOM 164endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
11obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 11obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
12obj-$(CONFIG_TIGON3) += tg3.o 12obj-$(CONFIG_TIGON3) += tg3.o
13obj-$(CONFIG_BGMAC) += bgmac.o 13obj-$(CONFIG_BGMAC) += bgmac.o
14obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 05ba62589017..ca5a20a48b14 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
2380 netif_napi_add(dev, &bp->napi, b44_poll, 64); 2380 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2381 dev->watchdog_timeo = B44_TX_TIMEOUT; 2381 dev->watchdog_timeo = B44_TX_TIMEOUT;
2382 dev->irq = sdev->irq; 2382 dev->irq = sdev->irq;
2383 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2383 dev->ethtool_ops = &b44_ethtool_ops;
2384 2384
2385 err = ssb_bus_powerup(sdev->bus, 0); 2385 err = ssb_bus_powerup(sdev->bus, 0);
2386 if (err) { 2386 if (err) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..3e8d1a88ed3d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1315 1315
1316}; 1316};
1317 1317
1318#define BCM_ENET_STATS_LEN \ 1318#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1319 (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1320 1319
1321static const u32 unused_mib_regs[] = { 1320static const u32 unused_mib_regs[] = {
1322 ETH_MIB_TX_ALL_OCTETS, 1321 ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1898 dev->netdev_ops = &bcm_enet_ops; 1897 dev->netdev_ops = &bcm_enet_ops;
1899 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 1898 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1900 1899
1901 SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); 1900 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1902 SET_NETDEV_DEV(dev, &pdev->dev); 1901 SET_NETDEV_DEV(dev, &pdev->dev);
1903 1902
1904 ret = register_netdev(dev); 1903 ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2784 /* register netdevice */ 2783 /* register netdevice */
2785 dev->netdev_ops = &bcm_enetsw_ops; 2784 dev->netdev_ops = &bcm_enetsw_ops;
2786 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); 2785 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2787 SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); 2786 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2788 SET_NETDEV_DEV(dev, &pdev->dev); 2787 SET_NETDEV_DEV(dev, &pdev->dev);
2789 2788
2790 spin_lock_init(&priv->enetsw_mdio_lock); 2789 spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..141160ef249a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1654 @@
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27
28#include "bcmsysport.h"
29
30/* I/O accessors register helpers */
31#define BCM_SYSPORT_IO_MACRO(name, offset) \
32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
33{ \
34 u32 reg = __raw_readl(priv->base + offset + off); \
35 return reg; \
36} \
37static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 u32 val, u32 off) \
39{ \
40 __raw_writel(val, priv->base + offset + off); \
41} \
42
43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
54/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
56 */
57#define BCM_SYSPORT_INTR_L2(which) \
58static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
59 u32 mask) \
60{ \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
63} \
64static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
65 u32 mask) \
66{ \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
69} \
70
71BCM_SYSPORT_INTR_L2(0)
72BCM_SYSPORT_INTR_L2(1)
73
74/* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
77 */
78static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
79 void __iomem *d,
80 dma_addr_t addr)
81{
82#ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 d + DESC_ADDR_HI_STATUS_LEN);
85#endif
86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
87}
88
89static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 struct dma_desc *desc,
91 unsigned int port)
92{
93 /* Ports are latched, so write upper address first */
94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
96}
97
98/* Ethtool operations */
99static int bcm_sysport_set_settings(struct net_device *dev,
100 struct ethtool_cmd *cmd)
101{
102 struct bcm_sysport_priv *priv = netdev_priv(dev);
103
104 if (!netif_running(dev))
105 return -EINVAL;
106
107 return phy_ethtool_sset(priv->phydev, cmd);
108}
109
110static int bcm_sysport_get_settings(struct net_device *dev,
111 struct ethtool_cmd *cmd)
112{
113 struct bcm_sysport_priv *priv = netdev_priv(dev);
114
115 if (!netif_running(dev))
116 return -EINVAL;
117
118 return phy_ethtool_gset(priv->phydev, cmd);
119}
120
121static int bcm_sysport_set_rx_csum(struct net_device *dev,
122 netdev_features_t wanted)
123{
124 struct bcm_sysport_priv *priv = netdev_priv(dev);
125 u32 reg;
126
127 priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
128 reg = rxchk_readl(priv, RXCHK_CONTROL);
129 if (priv->rx_csum_en)
130 reg |= RXCHK_EN;
131 else
132 reg &= ~RXCHK_EN;
133
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
136 */
137 if (priv->rx_csum_en && priv->crc_fwd)
138 reg |= RXCHK_SKIP_FCS;
139 else
140 reg &= ~RXCHK_SKIP_FCS;
141
142 rxchk_writel(priv, reg, RXCHK_CONTROL);
143
144 return 0;
145}
146
147static int bcm_sysport_set_tx_csum(struct net_device *dev,
148 netdev_features_t wanted)
149{
150 struct bcm_sysport_priv *priv = netdev_priv(dev);
151 u32 reg;
152
153 /* Hardware transmit checksum requires us to enable the Transmit status
154 * block prepended to the packet contents
155 */
156 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
157 reg = tdma_readl(priv, TDMA_CONTROL);
158 if (priv->tsb_en)
159 reg |= TSB_EN;
160 else
161 reg &= ~TSB_EN;
162 tdma_writel(priv, reg, TDMA_CONTROL);
163
164 return 0;
165}
166
167static int bcm_sysport_set_features(struct net_device *dev,
168 netdev_features_t features)
169{
170 netdev_features_t changed = features ^ dev->features;
171 netdev_features_t wanted = dev->wanted_features;
172 int ret = 0;
173
174 if (changed & NETIF_F_RXCSUM)
175 ret = bcm_sysport_set_rx_csum(dev, wanted);
176 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
177 ret = bcm_sysport_set_tx_csum(dev, wanted);
178
179 return ret;
180}
181
182/* Hardware counters must be kept in sync because the order/offset
183 * is important here (order in structure declaration = order in hardware)
184 */
185static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
186 /* general stats */
187 STAT_NETDEV(rx_packets),
188 STAT_NETDEV(tx_packets),
189 STAT_NETDEV(rx_bytes),
190 STAT_NETDEV(tx_bytes),
191 STAT_NETDEV(rx_errors),
192 STAT_NETDEV(tx_errors),
193 STAT_NETDEV(rx_dropped),
194 STAT_NETDEV(tx_dropped),
195 STAT_NETDEV(multicast),
196 /* UniMAC RSV counters */
197 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
198 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
199 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
200 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
201 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
202 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
203 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
204 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
205 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
206 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
207 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
208 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
209 STAT_MIB_RX("rx_multicast", mib.rx.mca),
210 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
211 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
212 STAT_MIB_RX("rx_control", mib.rx.cf),
213 STAT_MIB_RX("rx_pause", mib.rx.pf),
214 STAT_MIB_RX("rx_unknown", mib.rx.uo),
215 STAT_MIB_RX("rx_align", mib.rx.aln),
216 STAT_MIB_RX("rx_outrange", mib.rx.flr),
217 STAT_MIB_RX("rx_code", mib.rx.cde),
218 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
219 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
220 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
221 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
222 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
223 STAT_MIB_RX("rx_unicast", mib.rx.uc),
224 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
225 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
226 /* UniMAC TSV counters */
227 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
228 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
229 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
230 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
231 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
232 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
233 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
234 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
235 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
236 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
237 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
238 STAT_MIB_TX("tx_multicast", mib.tx.mca),
239 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
240 STAT_MIB_TX("tx_pause", mib.tx.pf),
241 STAT_MIB_TX("tx_control", mib.tx.cf),
242 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
243 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
244 STAT_MIB_TX("tx_defer", mib.tx.drf),
245 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
246 STAT_MIB_TX("tx_single_col", mib.tx.scl),
247 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
248 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
249 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
250 STAT_MIB_TX("tx_frags", mib.tx.frg),
251 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
252 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
253 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
254 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
255 STAT_MIB_TX("tx_unicast", mib.tx.uc),
256 /* UniMAC RUNT counters */
257 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
258 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
259 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
260 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
261 /* RXCHK misc statistics */
262 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
263 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
264 RXCHK_OTHER_DISC_CNTR),
265 /* RBUF misc statistics */
266 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
267 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
268};
269
270#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
271
272static void bcm_sysport_get_drvinfo(struct net_device *dev,
273 struct ethtool_drvinfo *info)
274{
275 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
276 strlcpy(info->version, "0.1", sizeof(info->version));
277 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
278 info->n_stats = BCM_SYSPORT_STATS_LEN;
279}
280
281static u32 bcm_sysport_get_msglvl(struct net_device *dev)
282{
283 struct bcm_sysport_priv *priv = netdev_priv(dev);
284
285 return priv->msg_enable;
286}
287
288static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
289{
290 struct bcm_sysport_priv *priv = netdev_priv(dev);
291
292 priv->msg_enable = enable;
293}
294
295static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
296{
297 switch (string_set) {
298 case ETH_SS_STATS:
299 return BCM_SYSPORT_STATS_LEN;
300 default:
301 return -EOPNOTSUPP;
302 }
303}
304
305static void bcm_sysport_get_strings(struct net_device *dev,
306 u32 stringset, u8 *data)
307{
308 int i;
309
310 switch (stringset) {
311 case ETH_SS_STATS:
312 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
313 memcpy(data + i * ETH_GSTRING_LEN,
314 bcm_sysport_gstrings_stats[i].stat_string,
315 ETH_GSTRING_LEN);
316 }
317 break;
318 default:
319 break;
320 }
321}
322
323static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
324{
325 int i, j = 0;
326
327 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
328 const struct bcm_sysport_stats *s;
329 u8 offset = 0;
330 u32 val = 0;
331 char *p;
332
333 s = &bcm_sysport_gstrings_stats[i];
334 switch (s->type) {
335 case BCM_SYSPORT_STAT_NETDEV:
336 continue;
337 case BCM_SYSPORT_STAT_MIB_RX:
338 case BCM_SYSPORT_STAT_MIB_TX:
339 case BCM_SYSPORT_STAT_RUNT:
340 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
341 offset = UMAC_MIB_STAT_OFFSET;
342 val = umac_readl(priv, UMAC_MIB_START + j + offset);
343 break;
344 case BCM_SYSPORT_STAT_RXCHK:
345 val = rxchk_readl(priv, s->reg_offset);
346 if (val == ~0)
347 rxchk_writel(priv, 0, s->reg_offset);
348 break;
349 case BCM_SYSPORT_STAT_RBUF:
350 val = rbuf_readl(priv, s->reg_offset);
351 if (val == ~0)
352 rbuf_writel(priv, 0, s->reg_offset);
353 break;
354 }
355
356 j += s->stat_sizeof;
357 p = (char *)priv + s->stat_offset;
358 *(u32 *)p = val;
359 }
360
361 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
362}
363
364static void bcm_sysport_get_stats(struct net_device *dev,
365 struct ethtool_stats *stats, u64 *data)
366{
367 struct bcm_sysport_priv *priv = netdev_priv(dev);
368 int i;
369
370 if (netif_running(dev))
371 bcm_sysport_update_mib_counters(priv);
372
373 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
374 const struct bcm_sysport_stats *s;
375 char *p;
376
377 s = &bcm_sysport_gstrings_stats[i];
378 if (s->type == BCM_SYSPORT_STAT_NETDEV)
379 p = (char *)&dev->stats;
380 else
381 p = (char *)priv;
382 p += s->stat_offset;
383 data[i] = *(u32 *)p;
384 }
385}
386
387static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
388{
389 dev_kfree_skb_any(cb->skb);
390 cb->skb = NULL;
391 dma_unmap_addr_set(cb, dma_addr, 0);
392}
393
394static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
395 struct bcm_sysport_cb *cb)
396{
397 struct device *kdev = &priv->pdev->dev;
398 struct net_device *ndev = priv->netdev;
399 dma_addr_t mapping;
400 int ret;
401
402 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
403 if (!cb->skb) {
404 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
405 return -ENOMEM;
406 }
407
408 mapping = dma_map_single(kdev, cb->skb->data,
409 RX_BUF_LENGTH, DMA_FROM_DEVICE);
410 ret = dma_mapping_error(kdev, mapping);
411 if (ret) {
412 bcm_sysport_free_cb(cb);
413 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
414 return ret;
415 }
416
417 dma_unmap_addr_set(cb, dma_addr, mapping);
418 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
419
420 priv->rx_bd_assign_index++;
421 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
422 priv->rx_bd_assign_ptr = priv->rx_bds +
423 (priv->rx_bd_assign_index * DESC_SIZE);
424
425 netif_dbg(priv, rx_status, ndev, "RX refill\n");
426
427 return 0;
428}
429
430static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
431{
432 struct bcm_sysport_cb *cb;
433 int ret = 0;
434 unsigned int i;
435
436 for (i = 0; i < priv->num_rx_bds; i++) {
437 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
438 if (cb->skb)
439 continue;
440
441 ret = bcm_sysport_rx_refill(priv, cb);
442 if (ret)
443 break;
444 }
445
446 return ret;
447}
448
449/* Poll the hardware for up to budget packets to process */
450static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
451 unsigned int budget)
452{
453 struct device *kdev = &priv->pdev->dev;
454 struct net_device *ndev = priv->netdev;
455 unsigned int processed = 0, to_process;
456 struct bcm_sysport_cb *cb;
457 struct sk_buff *skb;
458 unsigned int p_index;
459 u16 len, status;
460 struct bcm_rsb *rsb;
461
462 /* Determine how much we should process since last call */
463 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
464 p_index &= RDMA_PROD_INDEX_MASK;
465
466 if (p_index < priv->rx_c_index)
467 to_process = (RDMA_CONS_INDEX_MASK + 1) -
468 priv->rx_c_index + p_index;
469 else
470 to_process = p_index - priv->rx_c_index;
471
472 netif_dbg(priv, rx_status, ndev,
473 "p_index=%d rx_c_index=%d to_process=%d\n",
474 p_index, priv->rx_c_index, to_process);
475
476 while ((processed < to_process) &&
477 (processed < budget)) {
478
479 cb = &priv->rx_cbs[priv->rx_read_ptr];
480 skb = cb->skb;
481 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
482 RX_BUF_LENGTH, DMA_FROM_DEVICE);
483
484 /* Extract the Receive Status Block prepended */
485 rsb = (struct bcm_rsb *)skb->data;
486 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
487 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
488 DESC_STATUS_MASK;
489
490 processed++;
491 priv->rx_read_ptr++;
492 if (priv->rx_read_ptr == priv->num_rx_bds)
493 priv->rx_read_ptr = 0;
494
495 netif_dbg(priv, rx_status, ndev,
496 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
497 p_index, priv->rx_c_index, priv->rx_read_ptr,
498 len, status);
499
500 if (unlikely(!skb)) {
501 netif_err(priv, rx_err, ndev, "out of memory!\n");
502 ndev->stats.rx_dropped++;
503 ndev->stats.rx_errors++;
504 goto refill;
505 }
506
507 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
508 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
509 ndev->stats.rx_dropped++;
510 ndev->stats.rx_errors++;
511 bcm_sysport_free_cb(cb);
512 goto refill;
513 }
514
515 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
516 netif_err(priv, rx_err, ndev, "error packet\n");
517 if (status & RX_STATUS_OVFLOW)
518 ndev->stats.rx_over_errors++;
519 ndev->stats.rx_dropped++;
520 ndev->stats.rx_errors++;
521 bcm_sysport_free_cb(cb);
522 goto refill;
523 }
524
525 skb_put(skb, len);
526
527 /* Hardware validated our checksum */
528 if (likely(status & DESC_L4_CSUM))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
530
531 /* Hardware pre-pends packets with 2bytes before Ethernet
532 * header plus we have the Receive Status Block, strip off all
533 * of this from the SKB.
534 */
535 skb_pull(skb, sizeof(*rsb) + 2);
536 len -= (sizeof(*rsb) + 2);
537
538 /* UniMAC may forward CRC */
539 if (priv->crc_fwd) {
540 skb_trim(skb, len - ETH_FCS_LEN);
541 len -= ETH_FCS_LEN;
542 }
543
544 skb->protocol = eth_type_trans(skb, ndev);
545 ndev->stats.rx_packets++;
546 ndev->stats.rx_bytes += len;
547
548 napi_gro_receive(&priv->napi, skb);
549refill:
550 bcm_sysport_rx_refill(priv, cb);
551 }
552
553 return processed;
554}
555
556static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
557 struct bcm_sysport_cb *cb,
558 unsigned int *bytes_compl,
559 unsigned int *pkts_compl)
560{
561 struct device *kdev = &priv->pdev->dev;
562 struct net_device *ndev = priv->netdev;
563
564 if (cb->skb) {
565 ndev->stats.tx_bytes += cb->skb->len;
566 *bytes_compl += cb->skb->len;
567 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
568 dma_unmap_len(cb, dma_len),
569 DMA_TO_DEVICE);
570 ndev->stats.tx_packets++;
571 (*pkts_compl)++;
572 bcm_sysport_free_cb(cb);
573 /* SKB fragment */
574 } else if (dma_unmap_addr(cb, dma_addr)) {
575 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
576 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
577 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
578 dma_unmap_addr_set(cb, dma_addr, 0);
579 }
580}
581
582/* Reclaim queued SKBs for transmission completion, lockless version */
583static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
584 struct bcm_sysport_tx_ring *ring)
585{
586 struct net_device *ndev = priv->netdev;
587 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
588 unsigned int pkts_compl = 0, bytes_compl = 0;
589 struct bcm_sysport_cb *cb;
590 struct netdev_queue *txq;
591 u32 hw_ind;
592
593 txq = netdev_get_tx_queue(ndev, ring->index);
594
595 /* Compute how many descriptors have been processed since last call */
596 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
597 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
598 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
599
600 last_c_index = ring->c_index;
601 num_tx_cbs = ring->size;
602
603 c_index &= (num_tx_cbs - 1);
604
605 if (c_index >= last_c_index)
606 last_tx_cn = c_index - last_c_index;
607 else
608 last_tx_cn = num_tx_cbs - last_c_index + c_index;
609
610 netif_dbg(priv, tx_done, ndev,
611 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
612 ring->index, c_index, last_tx_cn, last_c_index);
613
614 while (last_tx_cn-- > 0) {
615 cb = ring->cbs + last_c_index;
616 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
617
618 ring->desc_count++;
619 last_c_index++;
620 last_c_index &= (num_tx_cbs - 1);
621 }
622
623 ring->c_index = c_index;
624
625 if (netif_tx_queue_stopped(txq) && pkts_compl)
626 netif_tx_wake_queue(txq);
627
628 netif_dbg(priv, tx_done, ndev,
629 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
630 ring->index, ring->c_index, pkts_compl, bytes_compl);
631
632 return pkts_compl;
633}
634
635/* Locked version of the per-ring TX reclaim routine */
636static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
637 struct bcm_sysport_tx_ring *ring)
638{
639 unsigned int released;
640 unsigned long flags;
641
642 spin_lock_irqsave(&ring->lock, flags);
643 released = __bcm_sysport_tx_reclaim(priv, ring);
644 spin_unlock_irqrestore(&ring->lock, flags);
645
646 return released;
647}
648
649static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
650{
651 struct bcm_sysport_tx_ring *ring =
652 container_of(napi, struct bcm_sysport_tx_ring, napi);
653 unsigned int work_done = 0;
654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656
657 if (work_done < budget) {
658 napi_complete(napi);
659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 }
662
663 return work_done;
664}
665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
667{
668 unsigned int q;
669
670 for (q = 0; q < priv->netdev->num_tx_queues; q++)
671 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
672}
673
674static int bcm_sysport_poll(struct napi_struct *napi, int budget)
675{
676 struct bcm_sysport_priv *priv =
677 container_of(napi, struct bcm_sysport_priv, napi);
678 unsigned int work_done = 0;
679
680 work_done = bcm_sysport_desc_rx(priv, budget);
681
682 priv->rx_c_index += work_done;
683 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
684 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
685
686 if (work_done < budget) {
687 napi_complete(napi);
688 /* re-enable RX interrupts */
689 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
690 }
691
692 return work_done;
693}
694
695
696/* RX and misc interrupt routine */
697static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
698{
699 struct net_device *dev = dev_id;
700 struct bcm_sysport_priv *priv = netdev_priv(dev);
701
702 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
703 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
704 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
705
706 if (unlikely(priv->irq0_stat == 0)) {
707 netdev_warn(priv->netdev, "spurious RX interrupt\n");
708 return IRQ_NONE;
709 }
710
711 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
712 if (likely(napi_schedule_prep(&priv->napi))) {
713 /* disable RX interrupts */
714 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
715 __napi_schedule(&priv->napi);
716 }
717 }
718
719 /* TX ring is full, perform a full reclaim since we do not know
720 * which one would trigger this interrupt
721 */
722 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
723 bcm_sysport_tx_reclaim_all(priv);
724
725 return IRQ_HANDLED;
726}
727
728/* TX interrupt service routine */
729static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
730{
731 struct net_device *dev = dev_id;
732 struct bcm_sysport_priv *priv = netdev_priv(dev);
733 struct bcm_sysport_tx_ring *txr;
734 unsigned int ring;
735
736 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
737 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
738 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
739
740 if (unlikely(priv->irq1_stat == 0)) {
741 netdev_warn(priv->netdev, "spurious TX interrupt\n");
742 return IRQ_NONE;
743 }
744
745 for (ring = 0; ring < dev->num_tx_queues; ring++) {
746 if (!(priv->irq1_stat & BIT(ring)))
747 continue;
748
749 txr = &priv->tx_rings[ring];
750
751 if (likely(napi_schedule_prep(&txr->napi))) {
752 intrl2_1_mask_set(priv, BIT(ring));
753 __napi_schedule(&txr->napi);
754 }
755 }
756
757 return IRQ_HANDLED;
758}
759
760static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
761{
762 struct sk_buff *nskb;
763 struct bcm_tsb *tsb;
764 u32 csum_info;
765 u8 ip_proto;
766 u16 csum_start;
767 u16 ip_ver;
768
769 /* Re-allocate SKB if needed */
770 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
771 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
772 dev_kfree_skb(skb);
773 if (!nskb) {
774 dev->stats.tx_errors++;
775 dev->stats.tx_dropped++;
776 return -ENOMEM;
777 }
778 skb = nskb;
779 }
780
781 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
782 /* Zero-out TSB by default */
783 memset(tsb, 0, sizeof(*tsb));
784
785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
786 ip_ver = htons(skb->protocol);
787 switch (ip_ver) {
788 case ETH_P_IP:
789 ip_proto = ip_hdr(skb)->protocol;
790 break;
791 case ETH_P_IPV6:
792 ip_proto = ipv6_hdr(skb)->nexthdr;
793 break;
794 default:
795 return 0;
796 }
797
798 /* Get the checksum offset and the L4 (transport) offset */
799 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
800 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
801 csum_info |= (csum_start << L4_PTR_SHIFT);
802
803 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
804 csum_info |= L4_LENGTH_VALID;
805 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
806 csum_info |= L4_UDP;
807 } else
808 csum_info = 0;
809
810 tsb->l4_ptr_dest_map = csum_info;
811 }
812
813 return 0;
814}
815
816static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
817 struct net_device *dev)
818{
819 struct bcm_sysport_priv *priv = netdev_priv(dev);
820 struct device *kdev = &priv->pdev->dev;
821 struct bcm_sysport_tx_ring *ring;
822 struct bcm_sysport_cb *cb;
823 struct netdev_queue *txq;
824 struct dma_desc *desc;
825 unsigned int skb_len;
826 unsigned long flags;
827 dma_addr_t mapping;
828 u32 len_status;
829 u16 queue;
830 int ret;
831
832 queue = skb_get_queue_mapping(skb);
833 txq = netdev_get_tx_queue(dev, queue);
834 ring = &priv->tx_rings[queue];
835
836 /* lock against tx reclaim in BH context and TX ring full interrupt */
837 spin_lock_irqsave(&ring->lock, flags);
838 if (unlikely(ring->desc_count == 0)) {
839 netif_tx_stop_queue(txq);
840 netdev_err(dev, "queue %d awake and ring full!\n", queue);
841 ret = NETDEV_TX_BUSY;
842 goto out;
843 }
844
845 /* Insert TSB and checksum infos */
846 if (priv->tsb_en) {
847 ret = bcm_sysport_insert_tsb(skb, dev);
848 if (ret) {
849 ret = NETDEV_TX_OK;
850 goto out;
851 }
852 }
853
854 /* The Ethernet switch we are interfaced with needs packets to be at
855 * least 64 bytes (including FCS) otherwise they will be discarded when
856 * they enter the switch port logic. When Broadcom tags are enabled, we
857 * need to make sure that packets are at least 68 bytes
858 * (including FCS and tag) because the length verification is done after
859 * the Broadcom tag is stripped off the ingress packet.
860 */
861 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
862 ret = NETDEV_TX_OK;
863 goto out;
864 }
865
866 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
867 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
868
869 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
870 if (dma_mapping_error(kdev, mapping)) {
871 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
872 skb->data, skb_len);
873 ret = NETDEV_TX_OK;
874 goto out;
875 }
876
877 /* Remember the SKB for future freeing */
878 cb = &ring->cbs[ring->curr_desc];
879 cb->skb = skb;
880 dma_unmap_addr_set(cb, dma_addr, mapping);
881 dma_unmap_len_set(cb, dma_len, skb_len);
882
883 /* Fetch a descriptor entry from our pool */
884 desc = ring->desc_cpu;
885
886 desc->addr_lo = lower_32_bits(mapping);
887 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
888 len_status |= (skb_len << DESC_LEN_SHIFT);
889 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
890 DESC_STATUS_SHIFT;
891 if (skb->ip_summed == CHECKSUM_PARTIAL)
892 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
893
894 ring->curr_desc++;
895 if (ring->curr_desc == ring->size)
896 ring->curr_desc = 0;
897 ring->desc_count--;
898
899 /* Ensure write completion of the descriptor status/length
900 * in DRAM before the System Port WRITE_PORT register latches
901 * the value
902 */
903 wmb();
904 desc->addr_status_len = len_status;
905 wmb();
906
907 /* Write this descriptor address to the RING write port */
908 tdma_port_write_desc_addr(priv, desc, ring->index);
909
910 /* Check ring space and update SW control flow */
911 if (ring->desc_count == 0)
912 netif_tx_stop_queue(txq);
913
914 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
915 ring->index, ring->desc_count, ring->curr_desc);
916
917 ret = NETDEV_TX_OK;
918out:
919 spin_unlock_irqrestore(&ring->lock, flags);
920 return ret;
921}
922
923static void bcm_sysport_tx_timeout(struct net_device *dev)
924{
925 netdev_warn(dev, "transmit timeout!\n");
926
927 dev->trans_start = jiffies;
928 dev->stats.tx_errors++;
929
930 netif_tx_wake_all_queues(dev);
931}
932
933/* phylib adjust link callback */
934static void bcm_sysport_adj_link(struct net_device *dev)
935{
936 struct bcm_sysport_priv *priv = netdev_priv(dev);
937 struct phy_device *phydev = priv->phydev;
938 unsigned int changed = 0;
939 u32 cmd_bits = 0, reg;
940
941 if (priv->old_link != phydev->link) {
942 changed = 1;
943 priv->old_link = phydev->link;
944 }
945
946 if (priv->old_duplex != phydev->duplex) {
947 changed = 1;
948 priv->old_duplex = phydev->duplex;
949 }
950
951 switch (phydev->speed) {
952 case SPEED_2500:
953 cmd_bits = CMD_SPEED_2500;
954 break;
955 case SPEED_1000:
956 cmd_bits = CMD_SPEED_1000;
957 break;
958 case SPEED_100:
959 cmd_bits = CMD_SPEED_100;
960 break;
961 case SPEED_10:
962 cmd_bits = CMD_SPEED_10;
963 break;
964 default:
965 break;
966 }
967 cmd_bits <<= CMD_SPEED_SHIFT;
968
969 if (phydev->duplex == DUPLEX_HALF)
970 cmd_bits |= CMD_HD_EN;
971
972 if (priv->old_pause != phydev->pause) {
973 changed = 1;
974 priv->old_pause = phydev->pause;
975 }
976
977 if (!phydev->pause)
978 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
979
980 if (changed) {
981 reg = umac_readl(priv, UMAC_CMD);
982 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
983 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
984 CMD_TX_PAUSE_IGNORE);
985 reg |= cmd_bits;
986 umac_writel(priv, reg, UMAC_CMD);
987
988 phy_print_status(priv->phydev);
989 }
990}
991
992static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
993 unsigned int index)
994{
995 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
996 struct device *kdev = &priv->pdev->dev;
997 size_t size;
998 void *p;
999 u32 reg;
1000
1001 /* Simple descriptors partitioning for now */
1002 size = 256;
1003
1004 /* We just need one DMA descriptor which is DMA-able, since writing to
1005 * the port will allocate a new descriptor in its internal linked-list
1006 */
1007 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
1008 if (!p) {
1009 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1010 return -ENOMEM;
1011 }
1012
1013 ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
1014 if (!ring->cbs) {
1015 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1016 return -ENOMEM;
1017 }
1018
1019 /* Initialize SW view of the ring */
1020 spin_lock_init(&ring->lock);
1021 ring->priv = priv;
1022 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1023 ring->index = index;
1024 ring->size = size;
1025 ring->alloc_size = ring->size;
1026 ring->desc_cpu = p;
1027 ring->desc_count = ring->size;
1028 ring->curr_desc = 0;
1029
1030 /* Initialize HW ring */
1031 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1032 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1033 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1034 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1035 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1036 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1037
1038 /* Program the number of descriptors as MAX_THRESHOLD and half of
1039 * its size for the hysteresis trigger
1040 */
1041 tdma_writel(priv, ring->size |
1042 1 << RING_HYST_THRESH_SHIFT,
1043 TDMA_DESC_RING_MAX_HYST(index));
1044
1045 /* Enable the ring queue in the arbiter */
1046 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1047 reg |= (1 << index);
1048 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1049
1050 napi_enable(&ring->napi);
1051
1052 netif_dbg(priv, hw, priv->netdev,
1053 "TDMA cfg, size=%d, desc_cpu=%p\n",
1054 ring->size, ring->desc_cpu);
1055
1056 return 0;
1057}
1058
1059static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1060 unsigned int index)
1061{
1062 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1063 struct device *kdev = &priv->pdev->dev;
1064 u32 reg;
1065
1066 /* Caller should stop the TDMA engine */
1067 reg = tdma_readl(priv, TDMA_STATUS);
1068 if (!(reg & TDMA_DISABLED))
1069 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1070
1071 napi_disable(&ring->napi);
1072 netif_napi_del(&ring->napi);
1073
1074 bcm_sysport_tx_reclaim(priv, ring);
1075
1076 kfree(ring->cbs);
1077 ring->cbs = NULL;
1078
1079 if (ring->desc_dma) {
1080 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
1081 ring->desc_dma = 0;
1082 }
1083 ring->size = 0;
1084 ring->alloc_size = 0;
1085
1086 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1087}
1088
1089/* RDMA helper */
1090static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1091 unsigned int enable)
1092{
1093 unsigned int timeout = 1000;
1094 u32 reg;
1095
1096 reg = rdma_readl(priv, RDMA_CONTROL);
1097 if (enable)
1098 reg |= RDMA_EN;
1099 else
1100 reg &= ~RDMA_EN;
1101 rdma_writel(priv, reg, RDMA_CONTROL);
1102
1103 /* Poll for RMDA disabling completion */
1104 do {
1105 reg = rdma_readl(priv, RDMA_STATUS);
1106 if (!!(reg & RDMA_DISABLED) == !enable)
1107 return 0;
1108 usleep_range(1000, 2000);
1109 } while (timeout-- > 0);
1110
1111 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1112
1113 return -ETIMEDOUT;
1114}
1115
1116/* TDMA helper */
1117static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1118 unsigned int enable)
1119{
1120 unsigned int timeout = 1000;
1121 u32 reg;
1122
1123 reg = tdma_readl(priv, TDMA_CONTROL);
1124 if (enable)
1125 reg |= TDMA_EN;
1126 else
1127 reg &= ~TDMA_EN;
1128 tdma_writel(priv, reg, TDMA_CONTROL);
1129
1130 /* Poll for TMDA disabling completion */
1131 do {
1132 reg = tdma_readl(priv, TDMA_STATUS);
1133 if (!!(reg & TDMA_DISABLED) == !enable)
1134 return 0;
1135
1136 usleep_range(1000, 2000);
1137 } while (timeout-- > 0);
1138
1139 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1140
1141 return -ETIMEDOUT;
1142}
1143
1144static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1145{
1146 u32 reg;
1147 int ret;
1148
1149 /* Initialize SW view of the RX ring */
1150 priv->num_rx_bds = NUM_RX_DESC;
1151 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1152 priv->rx_bd_assign_ptr = priv->rx_bds;
1153 priv->rx_bd_assign_index = 0;
1154 priv->rx_c_index = 0;
1155 priv->rx_read_ptr = 0;
1156 priv->rx_cbs = kzalloc(priv->num_rx_bds *
1157 sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1158 if (!priv->rx_cbs) {
1159 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1160 return -ENOMEM;
1161 }
1162
1163 ret = bcm_sysport_alloc_rx_bufs(priv);
1164 if (ret) {
1165 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1166 return ret;
1167 }
1168
1169 /* Initialize HW, ensure RDMA is disabled */
1170 reg = rdma_readl(priv, RDMA_STATUS);
1171 if (!(reg & RDMA_DISABLED))
1172 rdma_enable_set(priv, 0);
1173
1174 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1175 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1176 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1177 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1178 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1179 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1180 /* Operate the queue in ring mode */
1181 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1182 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1183 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1184 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1185
1186 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1187
1188 netif_dbg(priv, hw, priv->netdev,
1189 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1190 priv->num_rx_bds, priv->rx_bds);
1191
1192 return 0;
1193}
1194
1195static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1196{
1197 struct bcm_sysport_cb *cb;
1198 unsigned int i;
1199 u32 reg;
1200
1201 /* Caller should ensure RDMA is disabled */
1202 reg = rdma_readl(priv, RDMA_STATUS);
1203 if (!(reg & RDMA_DISABLED))
1204 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1205
1206 for (i = 0; i < priv->num_rx_bds; i++) {
1207 cb = &priv->rx_cbs[i];
1208 if (dma_unmap_addr(cb, dma_addr))
1209 dma_unmap_single(&priv->pdev->dev,
1210 dma_unmap_addr(cb, dma_addr),
1211 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1212 bcm_sysport_free_cb(cb);
1213 }
1214
1215 kfree(priv->rx_cbs);
1216 priv->rx_cbs = NULL;
1217
1218 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1219}
1220
1221static void bcm_sysport_set_rx_mode(struct net_device *dev)
1222{
1223 struct bcm_sysport_priv *priv = netdev_priv(dev);
1224 u32 reg;
1225
1226 reg = umac_readl(priv, UMAC_CMD);
1227 if (dev->flags & IFF_PROMISC)
1228 reg |= CMD_PROMISC;
1229 else
1230 reg &= ~CMD_PROMISC;
1231 umac_writel(priv, reg, UMAC_CMD);
1232
1233 /* No support for ALLMULTI */
1234 if (dev->flags & IFF_ALLMULTI)
1235 return;
1236}
1237
1238static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1239 unsigned int enable)
1240{
1241 u32 reg;
1242
1243 reg = umac_readl(priv, UMAC_CMD);
1244 if (enable)
1245 reg |= CMD_RX_EN | CMD_TX_EN;
1246 else
1247 reg &= ~(CMD_RX_EN | CMD_TX_EN);
1248 umac_writel(priv, reg, UMAC_CMD);
1249
1250 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1251 * to be processed (1 msec).
1252 */
1253 if (enable == 0)
1254 usleep_range(1000, 2000);
1255}
1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv)
1258{
1259 unsigned int timeout = 0;
1260 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277
1278 return ret;
1279}
1280
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1282 unsigned char *addr)
1283{
1284 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1285 (addr[2] << 8) | addr[3], UMAC_MAC0);
1286 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1287}
1288
1289static void topctrl_flush(struct bcm_sysport_priv *priv)
1290{
1291 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1292 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1293 mdelay(1);
1294 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1295 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1296}
1297
1298static int bcm_sysport_open(struct net_device *dev)
1299{
1300 struct bcm_sysport_priv *priv = netdev_priv(dev);
1301 unsigned int i;
1302 u32 reg;
1303 int ret;
1304
1305 /* Reset UniMAC */
1306 ret = umac_reset(priv);
1307 if (ret) {
1308 netdev_err(dev, "UniMAC reset failed\n");
1309 return ret;
1310 }
1311
1312 /* Flush TX and RX FIFOs at TOPCTRL level */
1313 topctrl_flush(priv);
1314
1315 /* Disable the UniMAC RX/TX */
1316 umac_enable_set(priv, 0);
1317
1318 /* Enable RBUF 2bytes alignment and Receive Status Block */
1319 reg = rbuf_readl(priv, RBUF_CONTROL);
1320 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1321 rbuf_writel(priv, reg, RBUF_CONTROL);
1322
1323 /* Set maximum frame length */
1324 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1325
1326 /* Set MAC address */
1327 umac_set_hw_addr(priv, dev->dev_addr);
1328
1329 /* Read CRC forward */
1330 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1331
1332 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1333 0, priv->phy_interface);
1334 if (!priv->phydev) {
1335 netdev_err(dev, "could not attach to PHY\n");
1336 return -ENODEV;
1337 }
1338
1339 /* Reset house keeping link status */
1340 priv->old_duplex = -1;
1341 priv->old_link = -1;
1342 priv->old_pause = -1;
1343
1344 /* mask all interrupts and request them */
1345 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1346 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1347 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1348 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1349 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1350 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1351
1352 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1353 if (ret) {
1354 netdev_err(dev, "failed to request RX interrupt\n");
1355 goto out_phy_disconnect;
1356 }
1357
1358 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1359 if (ret) {
1360 netdev_err(dev, "failed to request TX interrupt\n");
1361 goto out_free_irq0;
1362 }
1363
1364 /* Initialize both hardware and software ring */
1365 for (i = 0; i < dev->num_tx_queues; i++) {
1366 ret = bcm_sysport_init_tx_ring(priv, i);
1367 if (ret) {
1368 netdev_err(dev, "failed to initialize TX ring %d\n",
1369 i);
1370 goto out_free_tx_ring;
1371 }
1372 }
1373
1374 /* Initialize linked-list */
1375 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1376
1377 /* Initialize RX ring */
1378 ret = bcm_sysport_init_rx_ring(priv);
1379 if (ret) {
1380 netdev_err(dev, "failed to initialize RX ring\n");
1381 goto out_free_rx_ring;
1382 }
1383
1384 /* Turn on RDMA */
1385 ret = rdma_enable_set(priv, 1);
1386 if (ret)
1387 goto out_free_rx_ring;
1388
1389 /* Enable RX interrupt and TX ring full interrupt */
1390 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1391
1392 /* Turn on TDMA */
1393 ret = tdma_enable_set(priv, 1);
1394 if (ret)
1395 goto out_clear_rx_int;
1396
1397 /* Enable NAPI */
1398 napi_enable(&priv->napi);
1399
1400 /* Turn on UniMAC TX/RX */
1401 umac_enable_set(priv, 1);
1402
1403 phy_start(priv->phydev);
1404
1405 /* Enable TX interrupts for the 32 TXQs */
1406 intrl2_1_mask_clear(priv, 0xffffffff);
1407
1408 /* Last call before we start the real business */
1409 netif_tx_start_all_queues(dev);
1410
1411 return 0;
1412
1413out_clear_rx_int:
1414 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1415out_free_rx_ring:
1416 bcm_sysport_fini_rx_ring(priv);
1417out_free_tx_ring:
1418 for (i = 0; i < dev->num_tx_queues; i++)
1419 bcm_sysport_fini_tx_ring(priv, i);
1420 free_irq(priv->irq1, dev);
1421out_free_irq0:
1422 free_irq(priv->irq0, dev);
1423out_phy_disconnect:
1424 phy_disconnect(priv->phydev);
1425 return ret;
1426}
1427
1428static int bcm_sysport_stop(struct net_device *dev)
1429{
1430 struct bcm_sysport_priv *priv = netdev_priv(dev);
1431 unsigned int i;
1432 u32 reg;
1433 int ret;
1434
1435 /* stop all software from updating hardware */
1436 netif_tx_stop_all_queues(dev);
1437 napi_disable(&priv->napi);
1438 phy_stop(priv->phydev);
1439
1440 /* mask all interrupts */
1441 intrl2_0_mask_set(priv, 0xffffffff);
1442 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1443 intrl2_1_mask_set(priv, 0xffffffff);
1444 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1445
1446 /* Disable UniMAC RX */
1447 reg = umac_readl(priv, UMAC_CMD);
1448 reg &= ~CMD_RX_EN;
1449 umac_writel(priv, reg, UMAC_CMD);
1450
1451 ret = tdma_enable_set(priv, 0);
1452 if (ret) {
1453 netdev_err(dev, "timeout disabling RDMA\n");
1454 return ret;
1455 }
1456
1457 /* Wait for a maximum packet size to be drained */
1458 usleep_range(2000, 3000);
1459
1460 ret = rdma_enable_set(priv, 0);
1461 if (ret) {
1462 netdev_err(dev, "timeout disabling TDMA\n");
1463 return ret;
1464 }
1465
1466 /* Disable UniMAC TX */
1467 reg = umac_readl(priv, UMAC_CMD);
1468 reg &= ~CMD_TX_EN;
1469 umac_writel(priv, reg, UMAC_CMD);
1470
1471 /* Free RX/TX rings SW structures */
1472 for (i = 0; i < dev->num_tx_queues; i++)
1473 bcm_sysport_fini_tx_ring(priv, i);
1474 bcm_sysport_fini_rx_ring(priv);
1475
1476 free_irq(priv->irq0, dev);
1477 free_irq(priv->irq1, dev);
1478
1479 /* Disconnect from PHY */
1480 phy_disconnect(priv->phydev);
1481
1482 return 0;
1483}
1484
1485static struct ethtool_ops bcm_sysport_ethtool_ops = {
1486 .get_settings = bcm_sysport_get_settings,
1487 .set_settings = bcm_sysport_set_settings,
1488 .get_drvinfo = bcm_sysport_get_drvinfo,
1489 .get_msglevel = bcm_sysport_get_msglvl,
1490 .set_msglevel = bcm_sysport_set_msglvl,
1491 .get_link = ethtool_op_get_link,
1492 .get_strings = bcm_sysport_get_strings,
1493 .get_ethtool_stats = bcm_sysport_get_stats,
1494 .get_sset_count = bcm_sysport_get_sset_count,
1495};
1496
1497static const struct net_device_ops bcm_sysport_netdev_ops = {
1498 .ndo_start_xmit = bcm_sysport_xmit,
1499 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1500 .ndo_open = bcm_sysport_open,
1501 .ndo_stop = bcm_sysport_stop,
1502 .ndo_set_features = bcm_sysport_set_features,
1503 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
1504};
1505
1506#define REV_FMT "v%2x.%02x"
1507
1508static int bcm_sysport_probe(struct platform_device *pdev)
1509{
1510 struct bcm_sysport_priv *priv;
1511 struct device_node *dn;
1512 struct net_device *dev;
1513 const void *macaddr;
1514 struct resource *r;
1515 u32 txq, rxq;
1516 int ret;
1517
1518 dn = pdev->dev.of_node;
1519 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1520
1521 /* Read the Transmit/Receive Queue properties */
1522 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1523 txq = TDMA_NUM_RINGS;
1524 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1525 rxq = 1;
1526
1527 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1528 if (!dev)
1529 return -ENOMEM;
1530
1531 /* Initialize private members */
1532 priv = netdev_priv(dev);
1533
1534 priv->irq0 = platform_get_irq(pdev, 0);
1535 priv->irq1 = platform_get_irq(pdev, 1);
1536 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1537 dev_err(&pdev->dev, "invalid interrupts\n");
1538 ret = -EINVAL;
1539 goto err;
1540 }
1541
1542 priv->base = devm_ioremap_resource(&pdev->dev, r);
1543 if (IS_ERR(priv->base)) {
1544 ret = PTR_ERR(priv->base);
1545 goto err;
1546 }
1547
1548 priv->netdev = dev;
1549 priv->pdev = pdev;
1550
1551 priv->phy_interface = of_get_phy_mode(dn);
1552 /* Default to GMII interface mode */
1553 if (priv->phy_interface < 0)
1554 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1555
1556 /* In the case of a fixed PHY, the DT node associated
1557 * to the PHY is the Ethernet MAC DT node.
1558 */
1559 if (of_phy_is_fixed_link(dn)) {
1560 ret = of_phy_register_fixed_link(dn);
1561 if (ret) {
1562 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1563 goto err;
1564 }
1565
1566 priv->phy_dn = dn;
1567 }
1568
1569 /* Initialize netdevice members */
1570 macaddr = of_get_mac_address(dn);
1571 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1572 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1573 random_ether_addr(dev->dev_addr);
1574 } else {
1575 ether_addr_copy(dev->dev_addr, macaddr);
1576 }
1577
1578 SET_NETDEV_DEV(dev, &pdev->dev);
1579 dev_set_drvdata(&pdev->dev, dev);
1580 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
1581 dev->netdev_ops = &bcm_sysport_netdev_ops;
1582 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1583
1584 /* HW supported features, none enabled by default */
1585 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1587
1588 /* Set the needed headroom once and for all */
1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1590 dev->needed_headroom += sizeof(struct bcm_tsb);
1591
1592 /* We are interfaced to a switch which handles the multicast
1593 * filtering for us, so we do not support programming any
1594 * multicast hash table in this Ethernet MAC.
1595 */
1596 dev->flags &= ~IFF_MULTICAST;
1597
1598 /* libphy will adjust the link state accordingly */
1599 netif_carrier_off(dev);
1600
1601 ret = register_netdev(dev);
1602 if (ret) {
1603 dev_err(&pdev->dev, "failed to register net_device\n");
1604 goto err;
1605 }
1606
1607 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1608 dev_info(&pdev->dev,
1609 "Broadcom SYSTEMPORT" REV_FMT
1610 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1611 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1612 priv->base, priv->irq0, priv->irq1, txq, rxq);
1613
1614 return 0;
1615err:
1616 free_netdev(dev);
1617 return ret;
1618}
1619
1620static int bcm_sysport_remove(struct platform_device *pdev)
1621{
1622 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1623
1624 /* Not much to do, ndo_close has been called
1625 * and we use managed allocations
1626 */
1627 unregister_netdev(dev);
1628 free_netdev(dev);
1629 dev_set_drvdata(&pdev->dev, NULL);
1630
1631 return 0;
1632}
1633
1634static const struct of_device_id bcm_sysport_of_match[] = {
1635 { .compatible = "brcm,systemport-v1.00" },
1636 { .compatible = "brcm,systemport" },
1637 { /* sentinel */ }
1638};
1639
1640static struct platform_driver bcm_sysport_driver = {
1641 .probe = bcm_sysport_probe,
1642 .remove = bcm_sysport_remove,
1643 .driver = {
1644 .name = "brcm-systemport",
1645 .owner = THIS_MODULE,
1646 .of_match_table = bcm_sysport_of_match,
1647 },
1648};
1649module_platform_driver(bcm_sysport_driver);
1650
1651MODULE_AUTHOR("Broadcom Corporation");
1652MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
1653MODULE_ALIAS("platform:brcm-systemport");
1654MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..281c08246037
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,678 @@
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __BCM_SYSPORT_H
12#define __BCM_SYSPORT_H
13
14#include <linux/if_vlan.h>
15
16/* Receive/transmit descriptor format */
17#define DESC_ADDR_HI_STATUS_LEN 0x00
18#define DESC_ADDR_HI_SHIFT 0
19#define DESC_ADDR_HI_MASK 0xff
20#define DESC_STATUS_SHIFT 8
21#define DESC_STATUS_MASK 0x3ff
22#define DESC_LEN_SHIFT 18
23#define DESC_LEN_MASK 0x7fff
24#define DESC_ADDR_LO 0x04
25
26/* HW supports 40-bit addressing hence the */
27#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
28
29/* Default RX buffer allocation size */
30#define RX_BUF_LENGTH 2048
31
32/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
33 * 1536 is multiple of 256 bytes
34 */
35#define ENET_BRCM_TAG_LEN 4
36#define ENET_PAD 10
37#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
38 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
39
40/* Transmit status block */
41struct bcm_tsb {
42 u32 pcp_dei_vid;
43#define PCP_DEI_MASK 0xf
44#define VID_SHIFT 4
45#define VID_MASK 0xfff
46 u32 l4_ptr_dest_map;
47#define L4_CSUM_PTR_MASK 0x1ff
48#define L4_PTR_SHIFT 9
49#define L4_PTR_MASK 0x1ff
50#define L4_UDP (1 << 18)
51#define L4_LENGTH_VALID (1 << 19)
52#define DEST_MAP_SHIFT 20
53#define DEST_MAP_MASK 0x1ff
54};
55
56/* Receive status block uses the same
57 * definitions as the DMA descriptor
58 */
59struct bcm_rsb {
60 u32 rx_status_len;
61 u32 brcm_egress_tag;
62};
63
64/* Common Receive/Transmit status bits */
65#define DESC_L4_CSUM (1 << 7)
66#define DESC_SOP (1 << 8)
67#define DESC_EOP (1 << 9)
68
69/* Receive Status bits */
70#define RX_STATUS_UCAST 0
71#define RX_STATUS_BCAST 0x04
72#define RX_STATUS_MCAST 0x08
73#define RX_STATUS_L2_MCAST 0x0c
74#define RX_STATUS_ERR (1 << 4)
75#define RX_STATUS_OVFLOW (1 << 5)
76#define RX_STATUS_PARSE_FAIL (1 << 6)
77
78/* Transmit Status bits */
79#define TX_STATUS_VLAN_NO_ACT 0x00
80#define TX_STATUS_VLAN_PCP_TSB 0x01
81#define TX_STATUS_VLAN_QUEUE 0x02
82#define TX_STATUS_VLAN_VID_TSB 0x03
83#define TX_STATUS_OWR_CRC (1 << 2)
84#define TX_STATUS_APP_CRC (1 << 3)
85#define TX_STATUS_BRCM_TAG_NO_ACT 0
86#define TX_STATUS_BRCM_TAG_ZERO 0x10
87#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
88#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
89#define TX_STATUS_SKIP_BYTES (1 << 6)
90
91/* Specific register definitions */
92#define SYS_PORT_TOPCTRL_OFFSET 0
93#define REV_CNTL 0x00
94#define REV_MASK 0xffff
95
96#define RX_FLUSH_CNTL 0x04
97#define RX_FLUSH (1 << 0)
98
99#define TX_FLUSH_CNTL 0x08
100#define TX_FLUSH (1 << 0)
101
102#define MISC_CNTL 0x0c
103#define SYS_CLK_SEL (1 << 0)
104#define TDMA_EOP_SEL (1 << 1)
105
106/* Level-2 Interrupt controller offsets and defines */
107#define SYS_PORT_INTRL2_0_OFFSET 0x200
108#define SYS_PORT_INTRL2_1_OFFSET 0x240
109#define INTRL2_CPU_STATUS 0x00
110#define INTRL2_CPU_SET 0x04
111#define INTRL2_CPU_CLEAR 0x08
112#define INTRL2_CPU_MASK_STATUS 0x0c
113#define INTRL2_CPU_MASK_SET 0x10
114#define INTRL2_CPU_MASK_CLEAR 0x14
115
116/* Level-2 instance 0 interrupt bits */
117#define INTRL2_0_GISB_ERR (1 << 0)
118#define INTRL2_0_RBUF_OVFLOW (1 << 1)
119#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
120#define INTRL2_0_MPD (1 << 3)
121#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
122#define INTRL2_0_RDMA_MBDONE (1 << 5)
123#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
124#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
125#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
126#define INTRL2_0_TX_RING_FULL (1 << 9)
127#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
128#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
129
130/* RXCHK offset and defines */
131#define SYS_PORT_RXCHK_OFFSET 0x300
132
133#define RXCHK_CONTROL 0x00
134#define RXCHK_EN (1 << 0)
135#define RXCHK_SKIP_FCS (1 << 1)
136#define RXCHK_BAD_CSUM_DIS (1 << 2)
137#define RXCHK_BRCM_TAG_EN (1 << 3)
138#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
139#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
140#define RXCHK_PARSE_TNL (1 << 12)
141#define RXCHK_VIOL_EN (1 << 13)
142#define RXCHK_VIOL_DIS (1 << 14)
143#define RXCHK_INCOM_PKT (1 << 15)
144#define RXCHK_V6_DUPEXT_EN (1 << 16)
145#define RXCHK_V6_DUPEXT_DIS (1 << 17)
146#define RXCHK_ETHERTYPE_DIS (1 << 18)
147#define RXCHK_L2_HDR_DIS (1 << 19)
148#define RXCHK_L3_HDR_DIS (1 << 20)
149#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
150#define RXCHK_PARSE_AUTH (1 << 22)
151
152#define RXCHK_BRCM_TAG0 0x04
153#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
154#define RXCHK_BRCM_TAG0_MASK 0x24
155#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
156#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
157#define RXCHK_ETHERTYPE 0x48
158#define RXCHK_BAD_CSUM_CNTR 0x4C
159#define RXCHK_OTHER_DISC_CNTR 0x50
160
161/* TXCHCK offsets and defines */
162#define SYS_PORT_TXCHK_OFFSET 0x380
163#define TXCHK_PKT_RDY_THRESH 0x00
164
165/* Receive buffer offset and defines */
166#define SYS_PORT_RBUF_OFFSET 0x400
167
168#define RBUF_CONTROL 0x00
169#define RBUF_RSB_EN (1 << 0)
170#define RBUF_4B_ALGN (1 << 1)
171#define RBUF_BRCM_TAG_STRIP (1 << 2)
172#define RBUF_BAD_PKT_DISC (1 << 3)
173#define RBUF_RESUME_THRESH_SHIFT 4
174#define RBUF_RESUME_THRESH_MASK 0xff
175#define RBUF_OK_TO_SEND_SHIFT 12
176#define RBUF_OK_TO_SEND_MASK 0xff
177#define RBUF_CRC_REPLACE (1 << 20)
178#define RBUF_OK_TO_SEND_MODE (1 << 21)
179#define RBUF_RSB_SWAP (1 << 22)
180#define RBUF_ACPI_EN (1 << 23)
181
182#define RBUF_PKT_RDY_THRESH 0x04
183
184#define RBUF_STATUS 0x08
185#define RBUF_WOL_MODE (1 << 0)
186#define RBUF_MPD (1 << 1)
187#define RBUF_ACPI (1 << 2)
188
189#define RBUF_OVFL_DISC_CNTR 0x0c
190#define RBUF_ERR_PKT_CNTR 0x10
191
192/* Transmit buffer offset and defines */
193#define SYS_PORT_TBUF_OFFSET 0x600
194
195#define TBUF_CONTROL 0x00
196#define TBUF_BP_EN (1 << 0)
197#define TBUF_MAX_PKT_THRESH_SHIFT 1
198#define TBUF_MAX_PKT_THRESH_MASK 0x1f
199#define TBUF_FULL_THRESH_SHIFT 8
200#define TBUF_FULL_THRESH_MASK 0x1f
201
202/* UniMAC offset and defines */
203#define SYS_PORT_UMAC_OFFSET 0x800
204
205#define UMAC_CMD 0x008
206#define CMD_TX_EN (1 << 0)
207#define CMD_RX_EN (1 << 1)
208#define CMD_SPEED_SHIFT 2
209#define CMD_SPEED_10 0
210#define CMD_SPEED_100 1
211#define CMD_SPEED_1000 2
212#define CMD_SPEED_2500 3
213#define CMD_SPEED_MASK 3
214#define CMD_PROMISC (1 << 4)
215#define CMD_PAD_EN (1 << 5)
216#define CMD_CRC_FWD (1 << 6)
217#define CMD_PAUSE_FWD (1 << 7)
218#define CMD_RX_PAUSE_IGNORE (1 << 8)
219#define CMD_TX_ADDR_INS (1 << 9)
220#define CMD_HD_EN (1 << 10)
221#define CMD_SW_RESET (1 << 13)
222#define CMD_LCL_LOOP_EN (1 << 15)
223#define CMD_AUTO_CONFIG (1 << 22)
224#define CMD_CNTL_FRM_EN (1 << 23)
225#define CMD_NO_LEN_CHK (1 << 24)
226#define CMD_RMT_LOOP_EN (1 << 25)
227#define CMD_PRBL_EN (1 << 27)
228#define CMD_TX_PAUSE_IGNORE (1 << 28)
229#define CMD_TX_RX_EN (1 << 29)
230#define CMD_RUNT_FILTER_DIS (1 << 30)
231
232#define UMAC_MAC0 0x00c
233#define UMAC_MAC1 0x010
234#define UMAC_MAX_FRAME_LEN 0x014
235
236#define UMAC_TX_FLUSH 0x334
237
238#define UMAC_MIB_START 0x400
239
240/* There is a 0xC gap between the end of RX and beginning of TX stats and then
241 * between the end of TX stats and the beginning of the RX RUNT
242 */
243#define UMAC_MIB_STAT_OFFSET 0xc
244
245#define UMAC_MIB_CTRL 0x580
246#define MIB_RX_CNT_RST (1 << 0)
247#define MIB_RUNT_CNT_RST (1 << 1)
248#define MIB_TX_CNT_RST (1 << 2)
249#define UMAC_MDF_CTRL 0x650
250#define UMAC_MDF_ADDR 0x654
251
252/* Receive DMA offset and defines */
253#define SYS_PORT_RDMA_OFFSET 0x2000
254
255#define RDMA_CONTROL 0x1000
256#define RDMA_EN (1 << 0)
257#define RDMA_RING_CFG (1 << 1)
258#define RDMA_DISC_EN (1 << 2)
259#define RDMA_BUF_DATA_OFFSET_SHIFT 4
260#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
261
262#define RDMA_STATUS 0x1004
263#define RDMA_DISABLED (1 << 0)
264#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
265#define RDMA_BP_STATUS (1 << 2)
266
267#define RDMA_SCB_BURST_SIZE 0x1008
268
269#define RDMA_RING_BUF_SIZE 0x100c
270#define RDMA_RING_SIZE_SHIFT 16
271
272#define RDMA_WRITE_PTR_HI 0x1010
273#define RDMA_WRITE_PTR_LO 0x1014
274#define RDMA_PROD_INDEX 0x1018
275#define RDMA_PROD_INDEX_MASK 0xffff
276
277#define RDMA_CONS_INDEX 0x101c
278#define RDMA_CONS_INDEX_MASK 0xffff
279
280#define RDMA_START_ADDR_HI 0x1020
281#define RDMA_START_ADDR_LO 0x1024
282#define RDMA_END_ADDR_HI 0x1028
283#define RDMA_END_ADDR_LO 0x102c
284
285#define RDMA_MBDONE_INTR 0x1030
286#define RDMA_INTR_THRESH_MASK 0xff
287#define RDMA_TIMEOUT_SHIFT 16
288#define RDMA_TIMEOUT_MASK 0xffff
289
290#define RDMA_XON_XOFF_THRESH 0x1034
291#define RDMA_XON_XOFF_THRESH_MASK 0xffff
292#define RDMA_XOFF_THRESH_SHIFT 16
293
294#define RDMA_READ_PTR_HI 0x1038
295#define RDMA_READ_PTR_LO 0x103c
296
297#define RDMA_OVERRIDE 0x1040
298#define RDMA_LE_MODE (1 << 0)
299#define RDMA_REG_MODE (1 << 1)
300
301#define RDMA_TEST 0x1044
302#define RDMA_TP_OUT_SEL (1 << 0)
303#define RDMA_MEM_SEL (1 << 1)
304
305#define RDMA_DEBUG 0x1048
306
307/* Transmit DMA offset and defines */
308#define TDMA_NUM_RINGS 32 /* rings = queues */
309#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
310
311#define SYS_PORT_TDMA_OFFSET 0x4000
312#define TDMA_WRITE_PORT_OFFSET 0x0000
313#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
314 (i) * TDMA_PORT_SIZE)
315#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
316 sizeof(u32) + (i) * TDMA_PORT_SIZE)
317
318#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
319 (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
320#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
321 (i) * TDMA_PORT_SIZE)
322#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
323 sizeof(u32) + (i) * TDMA_PORT_SIZE)
324
325#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
326 (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
327#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
328 (i) * sizeof(u32))
329
330#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
331 (TDMA_NUM_RINGS * sizeof(u32)))
332
333/* Register offsets and defines relatives to a specific ring number */
334#define RING_HEAD_TAIL_PTR 0x00
335#define RING_HEAD_MASK 0x7ff
336#define RING_TAIL_SHIFT 11
337#define RING_TAIL_MASK 0x7ff
338#define RING_FLUSH (1 << 24)
339#define RING_EN (1 << 25)
340
341#define RING_COUNT 0x04
342#define RING_COUNT_MASK 0x7ff
343#define RING_BUFF_DONE_SHIFT 11
344#define RING_BUFF_DONE_MASK 0x7ff
345
346#define RING_MAX_HYST 0x08
347#define RING_MAX_THRESH_MASK 0x7ff
348#define RING_HYST_THRESH_SHIFT 11
349#define RING_HYST_THRESH_MASK 0x7ff
350
351#define RING_INTR_CONTROL 0x0c
352#define RING_INTR_THRESH_MASK 0x7ff
353#define RING_EMPTY_INTR_EN (1 << 15)
354#define RING_TIMEOUT_SHIFT 16
355#define RING_TIMEOUT_MASK 0xffff
356
357#define RING_PROD_CONS_INDEX 0x10
358#define RING_PROD_INDEX_MASK 0xffff
359#define RING_CONS_INDEX_SHIFT 16
360#define RING_CONS_INDEX_MASK 0xffff
361
362#define RING_MAPPING 0x14
363#define RING_QID_MASK 0x3
364#define RING_PORT_ID_SHIFT 3
365#define RING_PORT_ID_MASK 0x7
366#define RING_IGNORE_STATUS (1 << 6)
367#define RING_FAILOVER_EN (1 << 7)
368#define RING_CREDIT_SHIFT 8
369#define RING_CREDIT_MASK 0xffff
370
371#define RING_PCP_DEI_VID 0x18
372#define RING_VID_MASK 0x7ff
373#define RING_DEI (1 << 12)
374#define RING_PCP_SHIFT 13
375#define RING_PCP_MASK 0x7
376#define RING_PKT_SIZE_ADJ_SHIFT 16
377#define RING_PKT_SIZE_ADJ_MASK 0xf
378
379#define TDMA_DESC_RING_SIZE 28
380
381/* Defininition for a given TX ring base address */
382#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
383 ((i) * TDMA_DESC_RING_SIZE))
384
385/* Ring indexed register addreses */
386#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
387 RING_HEAD_TAIL_PTR)
388#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
389 RING_COUNT)
390#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
391 RING_MAX_HYST)
392#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
393 RING_INTR_CONTROL)
394#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
395 (TDMA_DESC_RING_BASE(i) + \
396 RING_PROD_CONS_INDEX)
397#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
398 RING_MAPPING)
399#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
400 RING_PCP_DEI_VID)
401
402#define TDMA_CONTROL 0x600
403#define TDMA_EN (1 << 0)
404#define TSB_EN (1 << 1)
405#define TSB_SWAP (1 << 2)
406#define ACB_ALGO (1 << 3)
407#define BUF_DATA_OFFSET_SHIFT 4
408#define BUF_DATA_OFFSET_MASK 0x3ff
409#define VLAN_EN (1 << 14)
410#define SW_BRCM_TAG (1 << 15)
411#define WNC_KPT_SIZE_UPDATE (1 << 16)
412#define SYNC_PKT_SIZE (1 << 17)
413#define ACH_TXDONE_DELAY_SHIFT 18
414#define ACH_TXDONE_DELAY_MASK 0xff
415
416#define TDMA_STATUS 0x604
417#define TDMA_DISABLED (1 << 0)
418#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
419
420#define TDMA_SCB_BURST_SIZE 0x608
421#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
422#define TDMA_OVER_HYST_THRESH_STATUS 0x610
423#define TDMA_TPID 0x614
424
425#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
426#define TDMA_FREE_HEAD_MASK 0x7ff
427#define TDMA_FREE_TAIL_SHIFT 11
428#define TDMA_FREE_TAIL_MASK 0x7ff
429
430#define TDMA_FREE_LIST_COUNT 0x61c
431#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
432
433#define TDMA_TIER2_ARB_CTRL 0x620
434#define TDMA_ARB_MODE_RR 0
435#define TDMA_ARB_MODE_WEIGHT_RR 0x1
436#define TDMA_ARB_MODE_STRICT 0x2
437#define TDMA_ARB_MODE_DEFICIT_RR 0x3
438#define TDMA_CREDIT_SHIFT 4
439#define TDMA_CREDIT_MASK 0xffff
440
441#define TDMA_TIER1_ARB_0_CTRL 0x624
442#define TDMA_ARB_EN (1 << 0)
443
444#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
445#define TDMA_TIER1_ARB_1_CTRL 0x62c
446#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
447#define TDMA_TIER1_ARB_2_CTRL 0x634
448#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
449#define TDMA_TIER1_ARB_3_CTRL 0x63c
450#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
451
452#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
453#define TDMA_LE_MODE (1 << 0)
454#define TDMA_REG_MODE (1 << 1)
455
456#define TDMA_TEST 0x648
457#define TDMA_TP_OUT_SEL (1 << 0)
458#define TDMA_MEM_TM (1 << 1)
459
460#define TDMA_DEBUG 0x64c
461
462/* Transmit/Receive descriptor */
463struct dma_desc {
464 u32 addr_status_len;
465 u32 addr_lo;
466};
467
468/* Number of Receive hardware descriptor words */
469#define NUM_HW_RX_DESC_WORDS 1024
470/* Real number of usable descriptors */
471#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
472
473/* Internal linked-list RAM has up to 1536 entries */
474#define NUM_TX_DESC 1536
475
476#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
477
478/* Rx/Tx common counter group.*/
479struct bcm_sysport_pkt_counters {
480 u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
481 u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
482 u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
483 u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
484 u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
485 u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
486 u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
487 u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
488 u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
489 u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
490};
491
492/* RSV, Receive Status Vector */
493struct bcm_sysport_rx_counters {
494 struct bcm_sysport_pkt_counters pkt_cnt;
495 u32 pkt; /* RO (0x428) Received pkt count*/
496 u32 bytes; /* RO Received byte count */
497 u32 mca; /* RO # of Received multicast pkt */
498 u32 bca; /* RO # of Receive broadcast pkt */
499 u32 fcs; /* RO # of Received FCS error */
500 u32 cf; /* RO # of Received control frame pkt*/
501 u32 pf; /* RO # of Received pause frame pkt */
502 u32 uo; /* RO # of unknown op code pkt */
503 u32 aln; /* RO # of alignment error count */
504 u32 flr; /* RO # of frame length out of range count */
505 u32 cde; /* RO # of code error pkt */
506 u32 fcr; /* RO # of carrier sense error pkt */
507 u32 ovr; /* RO # of oversize pkt*/
508 u32 jbr; /* RO # of jabber count */
509 u32 mtue; /* RO # of MTU error pkt*/
510 u32 pok; /* RO # of Received good pkt */
511 u32 uc; /* RO # of unicast pkt */
512 u32 ppp; /* RO # of PPP pkt */
513 u32 rcrc; /* RO (0x470),# of CRC match pkt */
514};
515
516/* TSV, Transmit Status Vector */
517struct bcm_sysport_tx_counters {
518 struct bcm_sysport_pkt_counters pkt_cnt;
519 u32 pkts; /* RO (0x4a8) Transmited pkt */
520 u32 mca; /* RO # of xmited multicast pkt */
521 u32 bca; /* RO # of xmited broadcast pkt */
522 u32 pf; /* RO # of xmited pause frame count */
523 u32 cf; /* RO # of xmited control frame count */
524 u32 fcs; /* RO # of xmited FCS error count */
525 u32 ovr; /* RO # of xmited oversize pkt */
526 u32 drf; /* RO # of xmited deferral pkt */
527 u32 edf; /* RO # of xmited Excessive deferral pkt*/
528 u32 scl; /* RO # of xmited single collision pkt */
529 u32 mcl; /* RO # of xmited multiple collision pkt*/
530 u32 lcl; /* RO # of xmited late collision pkt */
531 u32 ecl; /* RO # of xmited excessive collision pkt*/
532 u32 frg; /* RO # of xmited fragments pkt*/
533 u32 ncl; /* RO # of xmited total collision count */
534 u32 jbr; /* RO # of xmited jabber count*/
535 u32 bytes; /* RO # of xmited byte count */
536 u32 pok; /* RO # of xmited good pkt */
537 u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
538};
539
540struct bcm_sysport_mib {
541 struct bcm_sysport_rx_counters rx;
542 struct bcm_sysport_tx_counters tx;
543 u32 rx_runt_cnt;
544 u32 rx_runt_fcs;
545 u32 rx_runt_fcs_align;
546 u32 rx_runt_bytes;
547 u32 rxchk_bad_csum;
548 u32 rxchk_other_pkt_disc;
549 u32 rbuf_ovflow_cnt;
550 u32 rbuf_err_cnt;
551};
552
553/* HW maintains a large list of counters */
554enum bcm_sysport_stat_type {
555 BCM_SYSPORT_STAT_NETDEV = -1,
556 BCM_SYSPORT_STAT_MIB_RX,
557 BCM_SYSPORT_STAT_MIB_TX,
558 BCM_SYSPORT_STAT_RUNT,
559 BCM_SYSPORT_STAT_RXCHK,
560 BCM_SYSPORT_STAT_RBUF,
561};
562
563/* Macros to help define ethtool statistics */
564#define STAT_NETDEV(m) { \
565 .stat_string = __stringify(m), \
566 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
567 .stat_offset = offsetof(struct net_device_stats, m), \
568 .type = BCM_SYSPORT_STAT_NETDEV, \
569}
570
571#define STAT_MIB(str, m, _type) { \
572 .stat_string = str, \
573 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
574 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
575 .type = _type, \
576}
577
578#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
579#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
580#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
581
582#define STAT_RXCHK(str, m, ofs) { \
583 .stat_string = str, \
584 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
585 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
586 .type = BCM_SYSPORT_STAT_RXCHK, \
587 .reg_offset = ofs, \
588}
589
590#define STAT_RBUF(str, m, ofs) { \
591 .stat_string = str, \
592 .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
593 .stat_offset = offsetof(struct bcm_sysport_priv, m), \
594 .type = BCM_SYSPORT_STAT_RBUF, \
595 .reg_offset = ofs, \
596}
597
598struct bcm_sysport_stats {
599 char stat_string[ETH_GSTRING_LEN];
600 int stat_sizeof;
601 int stat_offset;
602 enum bcm_sysport_stat_type type;
603 /* reg offset from UMAC base for misc counters */
604 u16 reg_offset;
605};
606
607/* Software house keeping helper structure */
608struct bcm_sysport_cb {
609 struct sk_buff *skb; /* SKB for RX packets */
610 void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
611
612 DEFINE_DMA_UNMAP_ADDR(dma_addr);
613 DEFINE_DMA_UNMAP_LEN(dma_len);
614};
615
616/* Software view of the TX ring */
617struct bcm_sysport_tx_ring {
618 spinlock_t lock; /* Ring lock for tx reclaim/xmit */
619 struct napi_struct napi; /* NAPI per tx queue */
620 dma_addr_t desc_dma; /* DMA cookie */
621 unsigned int index; /* Ring index */
622 unsigned int size; /* Ring current size */
623 unsigned int alloc_size; /* Ring one-time allocated size */
624 unsigned int desc_count; /* Number of descriptors */
625 unsigned int curr_desc; /* Current descriptor */
626 unsigned int c_index; /* Last consumer index */
627 unsigned int p_index; /* Current producer index */
628 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
629 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
630 struct bcm_sysport_priv *priv; /* private context backpointer */
631};
632
633/* Driver private structure */
634struct bcm_sysport_priv {
635 void __iomem *base;
636 u32 irq0_stat;
637 u32 irq0_mask;
638 u32 irq1_stat;
639 u32 irq1_mask;
640 struct napi_struct napi ____cacheline_aligned;
641 struct net_device *netdev;
642 struct platform_device *pdev;
643 int irq0;
644 int irq1;
645
646 /* Transmit rings */
647 struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
648
649 /* Receive queue */
650 void __iomem *rx_bds;
651 void __iomem *rx_bd_assign_ptr;
652 unsigned int rx_bd_assign_index;
653 struct bcm_sysport_cb *rx_cbs;
654 unsigned int num_rx_bds;
655 unsigned int rx_read_ptr;
656 unsigned int rx_c_index;
657
658 /* PHY device */
659 struct device_node *phy_dn;
660 struct phy_device *phydev;
661 phy_interface_t phy_interface;
662 int old_pause;
663 int old_link;
664 int old_duplex;
665
666 /* Misc fields */
667 unsigned int rx_csum_en:1;
668 unsigned int tsb_en:1;
669 unsigned int crc_fwd:1;
670 u16 rev;
671
672 /* MIB related fields */
673 struct bcm_sysport_mib mib;
674
675 /* Ethtool */
676 u32 msg_enable;
677};
678#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0297a79a38e1..05c6af6c418f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
1436 return -ENOMEM; 1436 return -ENOMEM;
1437 net_dev->netdev_ops = &bgmac_netdev_ops; 1437 net_dev->netdev_ops = &bgmac_netdev_ops;
1438 net_dev->irq = core->irq; 1438 net_dev->irq = core->irq;
1439 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); 1439 net_dev->ethtool_ops = &bgmac_ethtool_ops;
1440 bgmac = netdev_priv(net_dev); 1440 bgmac = netdev_priv(net_dev);
1441 bgmac->net_dev = net_dev; 1441 bgmac->net_dev = net_dev;
1442 bgmac->core = core; 1442 bgmac->core = core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0ab83708b6a1..67d2b0047371 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6916,8 +6916,8 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6916 } 6916 }
6917 } 6917 }
6918 else { 6918 else {
6919 ethtool_cmd_speed_set(cmd, -1); 6919 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6920 cmd->duplex = -1; 6920 cmd->duplex = DUPLEX_UNKNOWN;
6921 } 6921 }
6922 spin_unlock_bh(&bp->phy_lock); 6922 spin_unlock_bh(&bp->phy_lock);
6923 6923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4d8f8aba0ea5..4cab09d3f807 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 */ 12 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dd57c7c5a3da..47c5814114e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
906 bd_prod = RX_BD(bd_prod); 906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons); 907 bd_cons = RX_BD(bd_cons);
908 908
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
918 */
919 rmb();
920
909 cqe_fp_flags = cqe_fp->type_error_flags; 921 cqe_fp_flags = cqe_fp->type_error_flags;
910 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3448cc033ca5..571427c7226b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 97ea5421dd96..51a952c51cb1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Dmitry Kravkov 16 * Written by: Dmitry Kravkov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 804b8f64463e..c6939ecb02c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Dmitry Kravkov 16 * Written by: Dmitry Kravkov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index b6de05e3149b..bd0600cf7266 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
3316 return T_ETH_INDIRECTION_TABLE_SIZE; 3316 return T_ETH_INDIRECTION_TABLE_SIZE;
3317} 3317}
3318 3318
3319static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 3319static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
3320{ 3320{
3321 struct bnx2x *bp = netdev_priv(dev); 3321 struct bnx2x *bp = netdev_priv(dev);
3322 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 3322 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
3340 return 0; 3340 return 0;
3341} 3341}
3342 3342
3343static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) 3343static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
3344 const u8 *key)
3344{ 3345{
3345 struct bnx2x *bp = netdev_priv(dev); 3346 struct bnx2x *bp = netdev_priv(dev);
3346 size_t i; 3347 size_t i;
3347 3348
3348 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 3349 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3349 /* 3350 /*
3350 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() 3351 * The same as in bnx2x_get_rxfh: we can't use a memcpy()
3351 * as an internal storage of an indirection table is a u8 array 3352 * as an internal storage of an indirection table is a u8 array
3352 * while indir->ring_index points to an array of u32. 3353 * while indir->ring_index points to an array of u32.
3353 * 3354 *
@@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3471 .get_rxnfc = bnx2x_get_rxnfc, 3472 .get_rxnfc = bnx2x_get_rxnfc,
3472 .set_rxnfc = bnx2x_set_rxnfc, 3473 .set_rxnfc = bnx2x_set_rxnfc,
3473 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3474 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3474 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3475 .get_rxfh = bnx2x_get_rxfh,
3475 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3476 .set_rxfh = bnx2x_set_rxfh,
3476 .get_channels = bnx2x_get_channels, 3477 .get_channels = bnx2x_get_channels,
3477 .set_channels = bnx2x_set_channels, 3478 .set_channels = bnx2x_set_channels,
3478 .get_module_info = bnx2x_get_module_info, 3479 .get_module_info = bnx2x_get_module_info,
@@ -3498,16 +3499,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3498 .get_rxnfc = bnx2x_get_rxnfc, 3499 .get_rxnfc = bnx2x_get_rxnfc,
3499 .set_rxnfc = bnx2x_set_rxnfc, 3500 .set_rxnfc = bnx2x_set_rxnfc,
3500 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 3501 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3501 .get_rxfh_indir = bnx2x_get_rxfh_indir, 3502 .get_rxfh = bnx2x_get_rxfh,
3502 .set_rxfh_indir = bnx2x_set_rxfh_indir, 3503 .set_rxfh = bnx2x_set_rxfh,
3503 .get_channels = bnx2x_get_channels, 3504 .get_channels = bnx2x_get_channels,
3504 .set_channels = bnx2x_set_channels, 3505 .set_channels = bnx2x_set_channels,
3505}; 3506};
3506 3507
3507void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) 3508void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
3508{ 3509{
3509 if (IS_PF(bp)) 3510 netdev->ethtool_ops = (IS_PF(bp)) ?
3510 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); 3511 &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
3511 else /* vf */
3512 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
3513} 3512}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae164fce..8aafd9b5d6a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Vladislav Zolotarov <vladz@broadcom.com> 10 * Written by: Vladislav Zolotarov
11 * Based on the original idea of John Wright <john.wright@hp.com>. 11 * Based on the original idea of John Wright <john.wright@hp.com>.
12 */ 12 */
13 13
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c2dfea7968f4..bd90e50bd8e6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation. 8 * the Free Software Foundation.
9 * 9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
11 * Written by: Eliezer Tamir 11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com> 12 * Modified by: Vladislav Zolotarov
13 */ 13 */
14 14
15#ifndef BNX2X_INIT_H 15#ifndef BNX2X_INIT_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd900960..5669ed2e87d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 * 10 *
11 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Vladislav Zolotarov <vladz@broadcom.com> 12 * Written by: Vladislav Zolotarov
13 */ 13 */
14 14
15#ifndef BNX2X_INIT_OPS_H 15#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9b6b3d7304b6..53fb4fa61b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2218,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
2218 */ 2218 */
2219 u32 val; 2219 u32 val;
2220 struct bnx2x *bp = params->bp; 2220 struct bnx2x *bp = params->bp;
2221 int bnx2x_status = 0;
2222 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); 2221 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
2223 2222
2224 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 2223 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2232,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
2232 bnx2x_update_pfc_nig(params, vars, pfc_params); 2231 bnx2x_update_pfc_nig(params, vars, pfc_params);
2233 2232
2234 if (!vars->link_up) 2233 if (!vars->link_up)
2235 return bnx2x_status; 2234 return 0;
2236 2235
2237 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); 2236 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
2238 2237
@@ -2246,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
2246 == 0) { 2245 == 0) {
2247 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); 2246 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
2248 bnx2x_emac_enable(params, vars, 0); 2247 bnx2x_emac_enable(params, vars, 0);
2249 return bnx2x_status; 2248 return 0;
2250 } 2249 }
2251 if (CHIP_IS_E2(bp)) 2250 if (CHIP_IS_E2(bp))
2252 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); 2251 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2260,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
2260 val = 1; 2259 val = 1;
2261 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); 2260 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
2262 } 2261 }
2263 return bnx2x_status; 2262 return 0;
2264} 2263}
2265 2264
2266static int bnx2x_bmac1_enable(struct link_params *params, 2265static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3703,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
3703static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3702static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3704 struct link_params *params, 3703 struct link_params *params,
3705 struct link_vars *vars) { 3704 struct link_vars *vars) {
3706 u16 lane, i, cl72_ctrl, an_adv = 0; 3705 u16 lane, i, cl72_ctrl, an_adv = 0, val;
3706 u32 wc_lane_config;
3707 struct bnx2x *bp = params->bp; 3707 struct bnx2x *bp = params->bp;
3708 static struct bnx2x_reg_set reg_set[] = { 3708 static struct bnx2x_reg_set reg_set[] = {
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3709 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3822,15 +3822,27 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3822 /* Enable Auto-Detect to support 1G over CL37 as well */ 3822 /* Enable Auto-Detect to support 1G over CL37 as well */
3823 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3823 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3824 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); 3824 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3825 3825 wc_lane_config = REG_RD(bp, params->shmem_base +
3826 offsetof(struct shmem_region, dev_info.
3827 shared_hw_config.wc_lane_config));
3828 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3829 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
3826 /* Force cl48 sync_status LOW to avoid getting stuck in CL73 3830 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3827 * parallel-detect loop when CL73 and CL37 are enabled. 3831 * parallel-detect loop when CL73 and CL37 are enabled.
3828 */ 3832 */
3829 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 3833 val |= 1 << 11;
3830 MDIO_AER_BLOCK_AER_REG, 0); 3834
3835 /* Restore Polarity settings in case it was run over by
3836 * previous link owner
3837 */
3838 if (wc_lane_config &
3839 (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
3840 val |= 3 << 2;
3841 else
3842 val &= ~(3 << 2);
3831 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3843 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3832 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); 3844 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
3833 bnx2x_set_aer_mmd(params, phy); 3845 val);
3834 3846
3835 bnx2x_disable_kr2(params, vars, phy); 3847 bnx2x_disable_kr2(params, vars, phy);
3836 } 3848 }
@@ -6473,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6473static int bnx2x_link_initialize(struct link_params *params, 6485static int bnx2x_link_initialize(struct link_params *params,
6474 struct link_vars *vars) 6486 struct link_vars *vars)
6475{ 6487{
6476 int rc = 0;
6477 u8 phy_index, non_ext_phy; 6488 u8 phy_index, non_ext_phy;
6478 struct bnx2x *bp = params->bp; 6489 struct bnx2x *bp = params->bp;
6479 /* In case of external phy existence, the line speed would be the 6490 /* In case of external phy existence, the line speed would be the
@@ -6546,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6546 NIG_STATUS_XGXS0_LINK_STATUS | 6557 NIG_STATUS_XGXS0_LINK_STATUS |
6547 NIG_STATUS_SERDES0_LINK_STATUS | 6558 NIG_STATUS_SERDES0_LINK_STATUS |
6548 NIG_MASK_MI_INT)); 6559 NIG_MASK_MI_INT));
6549 return rc; 6560 return 0;
6550} 6561}
6551 6562
6552static void bnx2x_int_link_reset(struct bnx2x_phy *phy, 6563static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -12461,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
12461 u32 dont_clear_stat, lfa_sts; 12472 u32 dont_clear_stat, lfa_sts;
12462 struct bnx2x *bp = params->bp; 12473 struct bnx2x *bp = params->bp;
12463 12474
12475 bnx2x_set_mdio_emac_per_phy(bp, params);
12464 /* Sync the link parameters */ 12476 /* Sync the link parameters */
12465 bnx2x_link_status_update(params, vars); 12477 bnx2x_link_status_update(params, vars);
12466 12478
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3a8e51ed5bec..2887034523e0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
@@ -10053,6 +10053,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056
10057static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10058{
10059 /* UNDI marks its presence in DORQ -
10060 * it initializes CID offset for normal bell to 0x7
10061 */
10062 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10063 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10064 return false;
10065
10066 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10067 BNX2X_DEV_INFO("UNDI previously loaded\n");
10068 return true;
10069 }
10070
10071 return false;
10072}
10073
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10074static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10075{
10058 u8 major, minor, version; 10076 u8 major, minor, version;
@@ -10302,6 +10320,10 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10302 10320
10303 BNX2X_DEV_INFO("Path is unmarked\n"); 10321 BNX2X_DEV_INFO("Path is unmarked\n");
10304 10322
10323 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10324 if (bnx2x_prev_is_after_undi(bp))
10325 goto out;
10326
10305 /* If function has FLR capabilities, and existing FW version matches 10327 /* If function has FLR capabilities, and existing FW version matches
10306 * the one required, then FLR will be sufficient to clean any residue 10328 * the one required, then FLR will be sufficient to clean any residue
10307 * left by previous driver 10329 * left by previous driver
@@ -10322,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10322 10344
10323 BNX2X_DEV_INFO("Could not FLR\n"); 10345 BNX2X_DEV_INFO("Could not FLR\n");
10324 10346
10347out:
10325 /* Close the MCP request, return failure*/ 10348 /* Close the MCP request, return failure*/
10326 rc = bnx2x_prev_mcp_done(bp); 10349 rc = bnx2x_prev_mcp_done(bp);
10327 if (!rc) 10350 if (!rc)
@@ -10360,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10360 /* close LLH filters towards the BRB */ 10383 /* close LLH filters towards the BRB */
10361 bnx2x_set_rx_filter(&bp->link_params, 0); 10384 bnx2x_set_rx_filter(&bp->link_params, 0);
10362 10385
10363 /* Check if the UNDI driver was previously loaded 10386 /* Check if the UNDI driver was previously loaded */
10364 * UNDI driver initializes CID offset for normal bell to 0x7 10387 if (bnx2x_prev_is_after_undi(bp)) {
10365 */ 10388 prev_undi = true;
10366 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 10389 /* clear the UNDI indication */
10367 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 10390 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10368 if (tmp_reg == 0x7) { 10391 /* clear possible idle check errors */
10369 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10392 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10370 prev_undi = true;
10371 /* clear the UNDI indication */
10372 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10373 /* clear possible idle check errors */
10374 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10375 }
10376 } 10393 }
10377 if (!CHIP_IS_E1x(bp)) 10394 if (!CHIP_IS_E1x(bp))
10378 /* block FW from writing to host */ 10395 /* block FW from writing to host */
@@ -13283,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13283 netdev_reset_tc(bp->dev); 13300 netdev_reset_tc(bp->dev);
13284 13301
13285 del_timer_sync(&bp->timer); 13302 del_timer_sync(&bp->timer);
13286 cancel_delayed_work(&bp->sp_task); 13303 cancel_delayed_work_sync(&bp->sp_task);
13287 cancel_delayed_work(&bp->period_task); 13304 cancel_delayed_work_sync(&bp->period_task);
13288 13305
13289 spin_lock_bh(&bp->stats_lock); 13306 spin_lock_bh(&bp->stats_lock);
13290 bp->stats_state = STATS_STATE_DISABLED; 13307 bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d725317c4277..b1936044767a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Vladislav Zolotarov 16 * Written by: Vladislav Zolotarov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 80f6c790ed88..718ecd294661 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Vladislav Zolotarov 16 * Written by: Vladislav Zolotarov
17 * 17 *
18 */ 18 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index faf01488d26e..eda8583f6fc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 * 18 *
19 */ 19 */
20#include "bnx2x.h" 20#include "bnx2x.h"
@@ -1071,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1073 1073
1074 /* set the VF doorbell threshold */ 1074 /* set the VF doorbell threshold. This threshold represents the amount
1075 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1075 * of doorbells allowed in the main DORQ fifo for a specific VF.
1076 */
1077 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1076} 1078}
1077 1079
1078void bnx2x_iov_init_dmae(struct bnx2x *bp) 1080void bnx2x_iov_init_dmae(struct bnx2x *bp)
@@ -2576,7 +2578,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2576 2578
2577 ivi->vf = vfidx; 2579 ivi->vf = vfidx;
2578 ivi->qos = 0; 2580 ivi->qos = 0;
2579 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 2581 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2582 ivi->min_tx_rate = 0;
2580 ivi->spoofchk = 1; /*always enabled */ 2583 ivi->spoofchk = 1; /*always enabled */
2581 if (vf->state == VF_ENABLED) { 2584 if (vf->state == VF_ENABLED) {
2582 /* mac and vlan are in vlan_mac objects */ 2585 /* mac and vlan are in vlan_mac objects */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 6929adba52f9..96c575e147a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 */ 18 */
19#ifndef BNX2X_SRIOV_H 19#ifndef BNX2X_SRIOV_H
20#define BNX2X_SRIOV_H 20#define BNX2X_SRIOV_H
@@ -571,7 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
571 return NULL; 571 return NULL;
572} 572}
573 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; } 574static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 3b75070411aa..ca47665f94bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index f35845006cdd..2beceaefdeea 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman 12 * UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 784c7155b98a..d712d0ddd719 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,9 +12,9 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 16 * Written by: Shmulik Ravid
17 * Ariel Elior <ariele@broadcom.com> 17 * Ariel Elior <ariel.elior@qlogic.com>
18 */ 18 */
19 19
20#include "bnx2x.h" 20#include "bnx2x.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index c922b81170e5..e21e706762c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
12 * license other than the GPL, without Broadcom's express prior written 12 * license other than the GPL, without Broadcom's express prior written
13 * consent. 13 * consent.
14 * 14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16 * Written by: Ariel Elior <ariele@broadcom.com> 16 * Written by: Ariel Elior <ariel.elior@qlogic.com>
17 */ 17 */
18#ifndef VF_PF_IF_H 18#ifndef VF_PF_IF_H
19#define VF_PF_IF_H 19#define VF_PF_IF_H
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4dd48d2fa804..8244e2b14bb4 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
608 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 return -EINVAL; 609 return -EINVAL;
610 } 610 }
611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
611 mutex_lock(&cnic_lock); 615 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
620 } 624 }
621 mutex_unlock(&cnic_lock); 625 mutex_unlock(&cnic_lock);
622 626
623 if (ulp_type == CNIC_ULP_ISCSI) 627 if (ulp_type == CNIC_ULP_FCOE)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL; 628 dev->fcoe_cap = NULL;
627 629
628 synchronize_rcu(); 630 synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1039 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_local *cp = dev->cnic_priv;
1040 struct cnic_uio_dev *udev; 1042 struct cnic_uio_dev *udev;
1041 1043
1042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) { 1045 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev; 1046 udev->dev = dev;
1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL; 1048 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM; 1049 return -ENOMEM;
1050 } 1050 }
1051 cp->udev = udev; 1051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0; 1052 return 0;
1054 } 1053 }
1055 } 1054 }
1056 read_unlock(&cnic_dev_lock);
1057 1055
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev) 1057 if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1067 if (__cnic_alloc_uio_rings(udev, pages)) 1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev; 1066 goto err_udev;
1069 1067
1070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list); 1068 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073 1069
1074 pci_dev_get(udev->pdev); 1070 pci_dev_get(udev->pdev);
1075 1071
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5624{ 5620{
5625 int if_type; 5621 int if_type;
5626 5622
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops; 5624 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx; 5625 void *ctx;
5631 5626
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5627 mutex_lock(&cnic_lock);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent) 5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
5634 continue; 5632 continue;
5633 }
5635 5634
5636 ctx = cp->ulp_handle[if_type]; 5635 ctx = cp->ulp_handle[if_type];
5637 5636
5637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5639 } 5643 }
5640 rcu_read_unlock();
5641} 5644}
5642 5645
5643/* netdev event handler */ 5646/* netdev event handler */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0966bd04375f..5ba1cfbd60da 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
2481 dev_set_drvdata(&pdev->dev, dev); 2481 dev_set_drvdata(&pdev->dev, dev);
2482 ether_addr_copy(dev->dev_addr, macaddr); 2482 ether_addr_copy(dev->dev_addr, macaddr);
2483 dev->watchdog_timeo = 2 * HZ; 2483 dev->watchdog_timeo = 2 * HZ;
2484 SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); 2484 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2485 dev->netdev_ops = &bcmgenet_netdev_ops; 2485 dev->netdev_ops = &bcmgenet_netdev_ops;
2486 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); 2486 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2487 2487
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4608673beaff..add8d8596084 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
298static int bcmgenet_mii_probe(struct net_device *dev) 298static int bcmgenet_mii_probe(struct net_device *dev)
299{ 299{
300 struct bcmgenet_priv *priv = netdev_priv(dev); 300 struct bcmgenet_priv *priv = netdev_priv(dev);
301 struct device_node *dn = priv->pdev->dev.of_node;
301 struct phy_device *phydev; 302 struct phy_device *phydev;
302 unsigned int phy_flags; 303 unsigned int phy_flags;
303 int ret; 304 int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
307 return 0; 308 return 0;
308 } 309 }
309 310
310 if (priv->phy_dn) 311 /* In the case of a fixed PHY, the DT node associated
311 phydev = of_phy_connect(dev, priv->phy_dn, 312 * to the PHY is the Ethernet MAC DT node.
312 bcmgenet_mii_setup, 0, 313 */
313 priv->phy_interface); 314 if (of_phy_is_fixed_link(dn)) {
314 else 315 ret = of_phy_register_fixed_link(dn);
315 phydev = of_phy_connect_fixed_link(dev, 316 if (ret)
316 bcmgenet_mii_setup, 317 return ret;
317 priv->phy_interface); 318
319 priv->phy_dn = dn;
320 }
318 321
322 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
323 priv->phy_interface);
319 if (!phydev) { 324 if (!phydev) {
320 pr_err("could not attach to PHY\n"); 325 pr_err("could not attach to PHY\n");
321 return -ENODEV; 326 return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e5d95c5ce1ad..df2792d8383d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation. 7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 136 97#define TG3_MIN_NUM 137
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "Jan 03, 2014" 100#define DRV_MODULE_RELDATE "May 11, 2014"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3224 return 0; 3224 return 0;
3225} 3225}
3226 3226
3227#define NVRAM_CMD_TIMEOUT 10000 3227#define NVRAM_CMD_TIMEOUT 100
3228 3228
3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230{ 3230{
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
7871 return NETDEV_TX_OK; 7871 return NETDEV_TX_OK;
7872} 7872}
7873 7873
7874/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 7874/* hard_start_xmit for all devices */
7875 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7876 */
7877static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7875static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7878{ 7876{
7879 struct tg3 *tp = netdev_priv(dev); 7877 struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7884 struct tg3_napi *tnapi; 7882 struct tg3_napi *tnapi;
7885 struct netdev_queue *txq; 7883 struct netdev_queue *txq;
7886 unsigned int last; 7884 unsigned int last;
7885 struct iphdr *iph = NULL;
7886 struct tcphdr *tcph = NULL;
7887 __sum16 tcp_csum = 0, ip_csum = 0;
7888 __be16 ip_tot_len = 0;
7887 7889
7888 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7890 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7889 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7891 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7915 7917
7916 mss = skb_shinfo(skb)->gso_size; 7918 mss = skb_shinfo(skb)->gso_size;
7917 if (mss) { 7919 if (mss) {
7918 struct iphdr *iph;
7919 u32 tcp_opt_len, hdr_len; 7920 u32 tcp_opt_len, hdr_len;
7920 7921
7921 if (skb_cow_head(skb, 0)) 7922 if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7927 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7928 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7928 7929
7929 if (!skb_is_gso_v6(skb)) { 7930 if (!skb_is_gso_v6(skb)) {
7931 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7932 tg3_flag(tp, TSO_BUG))
7933 return tg3_tso_bug(tp, skb);
7934
7935 ip_csum = iph->check;
7936 ip_tot_len = iph->tot_len;
7930 iph->check = 0; 7937 iph->check = 0;
7931 iph->tot_len = htons(mss + hdr_len); 7938 iph->tot_len = htons(mss + hdr_len);
7932 } 7939 }
7933 7940
7934 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7935 tg3_flag(tp, TSO_BUG))
7936 return tg3_tso_bug(tp, skb);
7937
7938 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7941 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7939 TXD_FLAG_CPU_POST_DMA); 7942 TXD_FLAG_CPU_POST_DMA);
7940 7943
7944 tcph = tcp_hdr(skb);
7945 tcp_csum = tcph->check;
7946
7941 if (tg3_flag(tp, HW_TSO_1) || 7947 if (tg3_flag(tp, HW_TSO_1) ||
7942 tg3_flag(tp, HW_TSO_2) || 7948 tg3_flag(tp, HW_TSO_2) ||
7943 tg3_flag(tp, HW_TSO_3)) { 7949 tg3_flag(tp, HW_TSO_3)) {
7944 tcp_hdr(skb)->check = 0; 7950 tcph->check = 0;
7945 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7951 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7946 } else 7952 } else {
7947 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 7953 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7948 iph->daddr, 0, 7954 0, IPPROTO_TCP, 0);
7949 IPPROTO_TCP, 7955 }
7950 0);
7951 7956
7952 if (tg3_flag(tp, HW_TSO_3)) { 7957 if (tg3_flag(tp, HW_TSO_3)) {
7953 mss |= (hdr_len & 0xc) << 12; 7958 mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8047 if (would_hit_hwbug) { 8052 if (would_hit_hwbug) {
8048 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8053 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8049 8054
8055 if (mss) {
8056 /* If it's a TSO packet, do GSO instead of
8057 * allocating and copying to a large linear SKB
8058 */
8059 if (ip_tot_len) {
8060 iph->check = ip_csum;
8061 iph->tot_len = ip_tot_len;
8062 }
8063 tcph->check = tcp_csum;
8064 return tg3_tso_bug(tp, skb);
8065 }
8066
8050 /* If the workaround fails due to memory/mapping 8067 /* If the workaround fails due to memory/mapping
8051 * failure, silently drop this packet. 8068 * failure, silently drop this packet.
8052 */ 8069 */
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
11876static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11893static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11877{ 11894{
11878 struct tg3 *tp = netdev_priv(dev); 11895 struct tg3 *tp = netdev_priv(dev);
11879 int ret; 11896 int ret, cpmu_restore = 0;
11880 u8 *pd; 11897 u8 *pd;
11881 u32 i, offset, len, b_offset, b_count; 11898 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11882 __be32 val; 11899 __be32 val;
11883 11900
11884 if (tg3_flag(tp, NO_NVRAM)) 11901 if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11890 11907
11891 eeprom->magic = TG3_EEPROM_MAGIC; 11908 eeprom->magic = TG3_EEPROM_MAGIC;
11892 11909
11910 /* Override clock, link aware and link idle modes */
11911 if (tg3_flag(tp, CPMU_PRESENT)) {
11912 cpmu_val = tr32(TG3_CPMU_CTRL);
11913 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11914 CPMU_CTRL_LINK_IDLE_MODE)) {
11915 tw32(TG3_CPMU_CTRL, cpmu_val &
11916 ~(CPMU_CTRL_LINK_AWARE_MODE |
11917 CPMU_CTRL_LINK_IDLE_MODE));
11918 cpmu_restore = 1;
11919 }
11920 }
11921 tg3_override_clk(tp);
11922
11893 if (offset & 3) { 11923 if (offset & 3) {
11894 /* adjustments to start on required 4 byte boundary */ 11924 /* adjustments to start on required 4 byte boundary */
11895 b_offset = offset & 3; 11925 b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11900 } 11930 }
11901 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 11931 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11902 if (ret) 11932 if (ret)
11903 return ret; 11933 goto eeprom_done;
11904 memcpy(data, ((char *)&val) + b_offset, b_count); 11934 memcpy(data, ((char *)&val) + b_offset, b_count);
11905 len -= b_count; 11935 len -= b_count;
11906 offset += b_count; 11936 offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11912 for (i = 0; i < (len - (len & 3)); i += 4) { 11942 for (i = 0; i < (len - (len & 3)); i += 4) {
11913 ret = tg3_nvram_read_be32(tp, offset + i, &val); 11943 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11914 if (ret) { 11944 if (ret) {
11945 if (i)
11946 i -= 4;
11915 eeprom->len += i; 11947 eeprom->len += i;
11916 return ret; 11948 goto eeprom_done;
11917 } 11949 }
11918 memcpy(pd + i, &val, 4); 11950 memcpy(pd + i, &val, 4);
11951 if (need_resched()) {
11952 if (signal_pending(current)) {
11953 eeprom->len += i;
11954 ret = -EINTR;
11955 goto eeprom_done;
11956 }
11957 cond_resched();
11958 }
11919 } 11959 }
11920 eeprom->len += i; 11960 eeprom->len += i;
11921 11961
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11926 b_offset = offset + len - b_count; 11966 b_offset = offset + len - b_count;
11927 ret = tg3_nvram_read_be32(tp, b_offset, &val); 11967 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11928 if (ret) 11968 if (ret)
11929 return ret; 11969 goto eeprom_done;
11930 memcpy(pd, &val, b_count); 11970 memcpy(pd, &val, b_count);
11931 eeprom->len += b_count; 11971 eeprom->len += b_count;
11932 } 11972 }
11933 return 0; 11973 ret = 0;
11974
11975eeprom_done:
11976 /* Restore clock, link aware and link idle modes */
11977 tg3_restore_clk(tp);
11978 if (cpmu_restore)
11979 tw32(TG3_CPMU_CTRL, cpmu_val);
11980
11981 return ret;
11934} 11982}
11935 11983
11936static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11984static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
@@ -12484,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12484 return size; 12532 return size;
12485} 12533}
12486 12534
12487static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) 12535static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
12488{ 12536{
12489 struct tg3 *tp = netdev_priv(dev); 12537 struct tg3 *tp = netdev_priv(dev);
12490 int i; 12538 int i;
@@ -12495,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12495 return 0; 12543 return 0;
12496} 12544}
12497 12545
12498static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) 12546static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
12499{ 12547{
12500 struct tg3 *tp = netdev_priv(dev); 12548 struct tg3 *tp = netdev_priv(dev);
12501 size_t i; 12549 size_t i;
@@ -14027,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
14027 .get_sset_count = tg3_get_sset_count, 14075 .get_sset_count = tg3_get_sset_count,
14028 .get_rxnfc = tg3_get_rxnfc, 14076 .get_rxnfc = tg3_get_rxnfc,
14029 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14077 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14030 .get_rxfh_indir = tg3_get_rxfh_indir, 14078 .get_rxfh = tg3_get_rxfh,
14031 .set_rxfh_indir = tg3_set_rxfh_indir, 14079 .set_rxfh = tg3_set_rxfh,
14032 .get_channels = tg3_get_channels, 14080 .get_channels = tg3_get_channels,
14033 .set_channels = tg3_set_channels, 14081 .set_channels = tg3_set_channels,
14034 .get_ts_info = tg3_get_ts_info, 14082 .get_ts_info = tg3_get_ts_info,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 04321e5a356e..461accaf0aa4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2013 Broadcom Corporation. 7 * Copyright (C) 2007-2014 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index f9e150825bb5..882cad71ad62 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -266,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
266 ethtool_cmd_speed_set(cmd, SPEED_10000); 266 ethtool_cmd_speed_set(cmd, SPEED_10000);
267 cmd->duplex = DUPLEX_FULL; 267 cmd->duplex = DUPLEX_FULL;
268 } else { 268 } else {
269 ethtool_cmd_speed_set(cmd, -1); 269 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
270 cmd->duplex = -1; 270 cmd->duplex = DUPLEX_UNKNOWN;
271 } 271 }
272 cmd->transceiver = XCVR_EXTERNAL; 272 cmd->transceiver = XCVR_EXTERNAL;
273 cmd->maxtxpkt = 0; 273 cmd->maxtxpkt = 0;
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
1137void 1137void
1138bnad_set_ethtool_ops(struct net_device *netdev) 1138bnad_set_ethtool_ops(struct net_device *netdev)
1139{ 1139{
1140 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); 1140 netdev->ethtool_ops = &bnad_ethtool_ops;
1141} 1141}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 521dfea44b83..25d6b2a10e4e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
1737 platform_set_drvdata(pdev, ndev); 1737 platform_set_drvdata(pdev, ndev);
1738 ether_setup(ndev); 1738 ether_setup(ndev);
1739 ndev->netdev_ops = &xgmac_netdev_ops; 1739 ndev->netdev_ops = &xgmac_netdev_ops;
1740 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1740 ndev->ethtool_ops = &xgmac_ethtool_ops;
1741 spin_lock_init(&priv->stats_lock); 1741 spin_lock_init(&priv->stats_lock);
1742 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); 1742 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1743 1743
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 05613a85ce61..186566bfdbc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -580,8 +580,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
580 ethtool_cmd_speed_set(cmd, p->link_config.speed); 580 ethtool_cmd_speed_set(cmd, p->link_config.speed);
581 cmd->duplex = p->link_config.duplex; 581 cmd->duplex = p->link_config.duplex;
582 } else { 582 } else {
583 ethtool_cmd_speed_set(cmd, -1); 583 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
584 cmd->duplex = -1; 584 cmd->duplex = DUPLEX_UNKNOWN;
585 } 585 }
586 586
587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 587 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1100 1100
1101 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1101 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1102 1102
1103 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1103 netdev->ethtool_ops = &t1_ethtool_ops;
1104 } 1104 }
1105 1105
1106 if (t1_init_sw_modules(adapter, bi) < 0) { 1106 if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 07bbb711b7e5..5d9cce053cc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1809,8 +1809,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1809 ethtool_cmd_speed_set(cmd, p->link_config.speed); 1809 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1810 cmd->duplex = p->link_config.duplex; 1810 cmd->duplex = p->link_config.duplex;
1811 } else { 1811 } else {
1812 ethtool_cmd_speed_set(cmd, -1); 1812 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1813 cmd->duplex = -1; 1813 cmd->duplex = DUPLEX_UNKNOWN;
1814 } 1814 }
1815 1815
1816 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1816 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3291 netdev->features |= NETIF_F_HIGHDMA; 3291 netdev->features |= NETIF_F_HIGHDMA;
3292 3292
3293 netdev->netdev_ops = &cxgb_netdev_ops; 3293 netdev->netdev_ops = &cxgb_netdev_ops;
3294 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 3294 netdev->ethtool_ops = &cxgb_ethtool_ops;
3295 } 3295 }
3296 3296
3297 pci_set_drvdata(pdev, adapter); 3297 pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index c0a9dd55f4e5..b0cbb2b7fd48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
185 if (ether_addr_equal(dev->dev_addr, mac)) { 185 if (ether_addr_equal(dev->dev_addr, mac)) {
186 rcu_read_lock(); 186 rcu_read_lock();
187 if (vlan && vlan != VLAN_VID_MASK) { 187 if (vlan && vlan != VLAN_VID_MASK) {
188 dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan); 188 dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
189 } else if (netif_is_bond_slave(dev)) { 189 } else if (netif_is_bond_slave(dev)) {
190 struct net_device *upper_dev; 190 struct net_device *upper_dev;
191 191
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db37709263..f503dce4ab17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ 357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
360 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
361 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
360}; 362};
361 363
362enum { 364enum {
363 MAX_EGRQ = 128, /* max # of egress queues, including FLs */ 365 INGQ_EXTRAS = 2, /* firmware event queue and */
364 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ 366 /* forwarded interrupts */
367 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
368 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
369 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
370 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
365}; 371};
366 372
367struct adapter; 373struct adapter;
@@ -538,6 +544,7 @@ struct sge {
538 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 544 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
539 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; 545 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
540 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 546 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
547 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
541 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 548 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
542 549
543 struct sge_rspq intrq ____cacheline_aligned_in_smp; 550 struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
548 u16 ethtxq_rover; /* Tx queue to clean up next */ 555 u16 ethtxq_rover; /* Tx queue to clean up next */
549 u16 ofldqsets; /* # of active offload queue sets */ 556 u16 ofldqsets; /* # of active offload queue sets */
550 u16 rdmaqs; /* # of available RDMA Rx queues */ 557 u16 rdmaqs; /* # of available RDMA Rx queues */
558 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
551 u16 ofld_rxq[MAX_OFLD_QSETS]; 559 u16 ofld_rxq[MAX_OFLD_QSETS];
552 u16 rdma_rxq[NCHAN]; 560 u16 rdma_rxq[NCHAN];
561 u16 rdma_ciq[NCHAN];
553 u16 timer_val[SGE_NTIMERS]; 562 u16 timer_val[SGE_NTIMERS];
554 u8 counter_val[SGE_NCOUNTERS]; 563 u8 counter_val[SGE_NCOUNTERS];
555 u32 fl_pg_order; /* large page allocation size */ 564 u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
577#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 586#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
578#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) 587#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
579#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 588#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
589#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
580 590
581struct l2t_data; 591struct l2t_data;
582 592
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..2f8d6b910383 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
818 for_each_rdmarxq(&adap->sge, i) 818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i); 820 adap->port[0]->name, i);
821
822 for_each_rdmaciq(&adap->sge, i)
823 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
824 adap->port[0]->name, i);
821} 825}
822 826
823static int request_msix_queue_irqs(struct adapter *adap) 827static int request_msix_queue_irqs(struct adapter *adap)
824{ 828{
825 struct sge *s = &adap->sge; 829 struct sge *s = &adap->sge;
826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; 830 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
831 int msi_index = 2;
827 832
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 833 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq); 834 adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
857 goto unwind; 862 goto unwind;
858 msi_index++; 863 msi_index++;
859 } 864 }
865 for_each_rdmaciq(s, rdmaciqqidx) {
866 err = request_irq(adap->msix_info[msi_index].vec,
867 t4_sge_intr_msix, 0,
868 adap->msix_info[msi_index].desc,
869 &s->rdmaciq[rdmaciqqidx].rspq);
870 if (err)
871 goto unwind;
872 msi_index++;
873 }
860 return 0; 874 return 0;
861 875
862unwind: 876unwind:
877 while (--rdmaciqqidx >= 0)
878 free_irq(adap->msix_info[--msi_index].vec,
879 &s->rdmaciq[rdmaciqqidx].rspq);
863 while (--rdmaqidx >= 0) 880 while (--rdmaqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec, 881 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmarxq[rdmaqidx].rspq); 882 &s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 902 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
886 for_each_rdmarxq(s, i) 903 for_each_rdmarxq(s, i)
887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 904 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
905 for_each_rdmaciq(s, i)
906 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
888} 907}
889 908
890/** 909/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
1047 if (msi_idx > 0) 1066 if (msi_idx > 0)
1048 msi_idx++; 1067 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, 1068 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler); 1069 q->fl.size ? &q->fl : NULL,
1070 uldrx_handler);
1051 if (err) 1071 if (err)
1052 goto freeout; 1072 goto freeout;
1053 memset(&q->stats, 0, sizeof(q->stats)); 1073 memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
1064 if (msi_idx > 0) 1084 if (msi_idx > 0)
1065 msi_idx++; 1085 msi_idx++;
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 1086 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler); 1087 msi_idx, q->fl.size ? &q->fl : NULL,
1088 uldrx_handler);
1068 if (err) 1089 if (err)
1069 goto freeout; 1090 goto freeout;
1070 memset(&q->stats, 0, sizeof(q->stats)); 1091 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id; 1092 s->rdma_rxq[i] = q->rspq.abs_id;
1072 } 1093 }
1073 1094
1095 for_each_rdmaciq(s, i) {
1096 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1097
1098 if (msi_idx > 0)
1099 msi_idx++;
1100 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1101 msi_idx, q->fl.size ? &q->fl : NULL,
1102 uldrx_handler);
1103 if (err)
1104 goto freeout;
1105 memset(&q->stats, 0, sizeof(q->stats));
1106 s->rdma_ciq[i] = q->rspq.abs_id;
1107 }
1108
1074 for_each_port(adap, i) { 1109 for_each_port(adap, i) {
1075 /* 1110 /*
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't 1111 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -2252,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2252 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 2287 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2253 p->port_type == FW_PORT_TYPE_FIBER_XAUI) 2288 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2254 cmd->port = PORT_FIBRE; 2289 cmd->port = PORT_FIBRE;
2255 else if (p->port_type == FW_PORT_TYPE_SFP) { 2290 else if (p->port_type == FW_PORT_TYPE_SFP ||
2256 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 2291 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2257 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 2292 p->port_type == FW_PORT_TYPE_QSFP) {
2293 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2294 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2295 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2296 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2297 cmd->port = PORT_FIBRE;
2298 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2299 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2258 cmd->port = PORT_DA; 2300 cmd->port = PORT_DA;
2259 else 2301 else
2260 cmd->port = PORT_FIBRE; 2302 cmd->port = PORT_OTHER;
2261 } else 2303 } else
2262 cmd->port = PORT_OTHER; 2304 cmd->port = PORT_OTHER;
2263 2305
@@ -2461,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
2461} 2503}
2462 2504
2463/** 2505/**
2464 * set_rxq_intr_params - set a queue's interrupt holdoff parameters 2506 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2465 * @adap: the adapter
2466 * @q: the Rx queue 2507 * @q: the Rx queue
2467 * @us: the hold-off time in us, or 0 to disable timer 2508 * @us: the hold-off time in us, or 0 to disable timer
2468 * @cnt: the hold-off packet count, or 0 to disable counter 2509 * @cnt: the hold-off packet count, or 0 to disable counter
@@ -2470,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
2470 * Sets an Rx queue's interrupt hold-off time and packet count. At least 2511 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2471 * one of the two needs to be enabled for the queue to generate interrupts. 2512 * one of the two needs to be enabled for the queue to generate interrupts.
2472 */ 2513 */
2473static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, 2514static int set_rspq_intr_params(struct sge_rspq *q,
2474 unsigned int us, unsigned int cnt) 2515 unsigned int us, unsigned int cnt)
2475{ 2516{
2517 struct adapter *adap = q->adap;
2518
2476 if ((us | cnt) == 0) 2519 if ((us | cnt) == 0)
2477 cnt = 1; 2520 cnt = 1;
2478 2521
@@ -2499,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2499 return 0; 2542 return 0;
2500} 2543}
2501 2544
2502static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2545/**
2546 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2547 * @dev: the network device
2548 * @us: the hold-off time in us, or 0 to disable timer
2549 * @cnt: the hold-off packet count, or 0 to disable counter
2550 *
2551 * Set the RX interrupt hold-off parameters for a network device.
2552 */
2553static int set_rx_intr_params(struct net_device *dev,
2554 unsigned int us, unsigned int cnt)
2503{ 2555{
2504 const struct port_info *pi = netdev_priv(dev); 2556 int i, err;
2557 struct port_info *pi = netdev_priv(dev);
2505 struct adapter *adap = pi->adapter; 2558 struct adapter *adap = pi->adapter;
2506 struct sge_rspq *q; 2559 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2507 int i; 2560
2508 int r = 0; 2561 for (i = 0; i < pi->nqsets; i++, q++) {
2509 2562 err = set_rspq_intr_params(&q->rspq, us, cnt);
2510 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) { 2563 if (err)
2511 q = &adap->sge.ethrxq[i].rspq; 2564 return err;
2512 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2513 c->rx_max_coalesced_frames);
2514 if (r) {
2515 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2516 break;
2517 }
2518 } 2565 }
2519 return r; 2566 return 0;
2567}
2568
2569static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2570{
2571 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2572 c->rx_max_coalesced_frames);
2520} 2573}
2521 2574
2522static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2575static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2732,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev)
2732 return pi->rss_size; 2785 return pi->rss_size;
2733} 2786}
2734 2787
2735static int get_rss_table(struct net_device *dev, u32 *p) 2788static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2736{ 2789{
2737 const struct port_info *pi = netdev_priv(dev); 2790 const struct port_info *pi = netdev_priv(dev);
2738 unsigned int n = pi->rss_size; 2791 unsigned int n = pi->rss_size;
@@ -2742,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p)
2742 return 0; 2795 return 0;
2743} 2796}
2744 2797
2745static int set_rss_table(struct net_device *dev, const u32 *p) 2798static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2746{ 2799{
2747 unsigned int i; 2800 unsigned int i;
2748 struct port_info *pi = netdev_priv(dev); 2801 struct port_info *pi = netdev_priv(dev);
@@ -2844,8 +2897,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2844 .set_wol = set_wol, 2897 .set_wol = set_wol,
2845 .get_rxnfc = get_rxnfc, 2898 .get_rxnfc = get_rxnfc,
2846 .get_rxfh_indir_size = get_rss_table_size, 2899 .get_rxfh_indir_size = get_rss_table_size,
2847 .get_rxfh_indir = get_rss_table, 2900 .get_rxfh = get_rss_table,
2848 .set_rxfh_indir = set_rss_table, 2901 .set_rxfh = set_rss_table,
2849 .flash_device = set_flash, 2902 .flash_device = set_flash,
2850}; 2903};
2851 2904
@@ -3386,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3386EXPORT_SYMBOL(cxgb4_best_mtu); 3439EXPORT_SYMBOL(cxgb4_best_mtu);
3387 3440
3388/** 3441/**
3442 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3443 * @mtus: the HW MTU table
3444 * @header_size: Header Size
3445 * @data_size_max: maximum Data Segment Size
3446 * @data_size_align: desired Data Segment Size Alignment (2^N)
3447 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3448 *
3449 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3450 * MTU Table based solely on a Maximum MTU parameter, we break that
3451 * parameter up into a Header Size and Maximum Data Segment Size, and
3452 * provide a desired Data Segment Size Alignment. If we find an MTU in
3453 * the Hardware MTU Table which will result in a Data Segment Size with
3454 * the requested alignment _and_ that MTU isn't "too far" from the
3455 * closest MTU, then we'll return that rather than the closest MTU.
3456 */
3457unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3458 unsigned short header_size,
3459 unsigned short data_size_max,
3460 unsigned short data_size_align,
3461 unsigned int *mtu_idxp)
3462{
3463 unsigned short max_mtu = header_size + data_size_max;
3464 unsigned short data_size_align_mask = data_size_align - 1;
3465 int mtu_idx, aligned_mtu_idx;
3466
3467 /* Scan the MTU Table till we find an MTU which is larger than our
3468 * Maximum MTU or we reach the end of the table. Along the way,
3469 * record the last MTU found, if any, which will result in a Data
3470 * Segment Length matching the requested alignment.
3471 */
3472 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3473 unsigned short data_size = mtus[mtu_idx] - header_size;
3474
3475 /* If this MTU minus the Header Size would result in a
3476 * Data Segment Size of the desired alignment, remember it.
3477 */
3478 if ((data_size & data_size_align_mask) == 0)
3479 aligned_mtu_idx = mtu_idx;
3480
3481 /* If we're not at the end of the Hardware MTU Table and the
3482 * next element is larger than our Maximum MTU, drop out of
3483 * the loop.
3484 */
3485 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3486 break;
3487 }
3488
3489 /* If we fell out of the loop because we ran to the end of the table,
3490 * then we just have to use the last [largest] entry.
3491 */
3492 if (mtu_idx == NMTUS)
3493 mtu_idx--;
3494
3495 /* If we found an MTU which resulted in the requested Data Segment
3496 * Length alignment and that's "not far" from the largest MTU which is
3497 * less than or equal to the maximum MTU, then use that.
3498 */
3499 if (aligned_mtu_idx >= 0 &&
3500 mtu_idx - aligned_mtu_idx <= 1)
3501 mtu_idx = aligned_mtu_idx;
3502
3503 /* If the caller has passed in an MTU Index pointer, pass the
3504 * MTU Index back. Return the MTU value.
3505 */
3506 if (mtu_idxp)
3507 *mtu_idxp = mtu_idx;
3508 return mtus[mtu_idx];
3509}
3510EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3511
3512/**
3389 * cxgb4_port_chan - get the HW channel of a port 3513 * cxgb4_port_chan - get the HW channel of a port
3390 * @dev: the net device for the port 3514 * @dev: the net device for the port
3391 * 3515 *
@@ -3782,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3782 lli.mtus = adap->params.mtus; 3906 lli.mtus = adap->params.mtus;
3783 if (uld == CXGB4_ULD_RDMA) { 3907 if (uld == CXGB4_ULD_RDMA) {
3784 lli.rxq_ids = adap->sge.rdma_rxq; 3908 lli.rxq_ids = adap->sge.rdma_rxq;
3909 lli.ciq_ids = adap->sge.rdma_ciq;
3785 lli.nrxq = adap->sge.rdmaqs; 3910 lli.nrxq = adap->sge.rdmaqs;
3911 lli.nciq = adap->sge.rdmaciqs;
3786 } else if (uld == CXGB4_ULD_ISCSI) { 3912 } else if (uld == CXGB4_ULD_ISCSI) {
3787 lli.rxq_ids = adap->sge.ofld_rxq; 3913 lli.rxq_ids = adap->sge.ofld_rxq;
3788 lli.nrxq = adap->sge.ofldqsets; 3914 lli.nrxq = adap->sge.ofldqsets;
@@ -4061,7 +4187,7 @@ static int update_root_dev_clip(struct net_device *dev)
4061 4187
4062 /* Parse all bond and vlan devices layered on top of the physical dev */ 4188 /* Parse all bond and vlan devices layered on top of the physical dev */
4063 for (i = 0; i < VLAN_N_VID; i++) { 4189 for (i = 0; i < VLAN_N_VID; i++) {
4064 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i); 4190 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4065 if (!root_dev) 4191 if (!root_dev)
4066 continue; 4192 continue;
4067 4193
@@ -5528,13 +5654,41 @@ static int adap_init0(struct adapter *adap)
5528#undef FW_PARAM_PFVF 5654#undef FW_PARAM_PFVF
5529#undef FW_PARAM_DEV 5655#undef FW_PARAM_DEV
5530 5656
5531 /* 5657 /* The MTU/MSS Table is initialized by now, so load their values. If
5532 * These are finalized by FW initialization, load their values now. 5658 * we're initializing the adapter, then we'll make any modifications
5659 * we want to the MTU/MSS Table and also initialize the congestion
5660 * parameters.
5533 */ 5661 */
5534 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5662 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5535 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5663 if (state != DEV_STATE_INIT) {
5536 adap->params.b_wnd); 5664 int i;
5665
5666 /* The default MTU Table contains values 1492 and 1500.
5667 * However, for TCP, it's better to have two values which are
5668 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5669 * This allows us to have a TCP Data Payload which is a
5670 * multiple of 8 regardless of what combination of TCP Options
5671 * are in use (always a multiple of 4 bytes) which is
5672 * important for performance reasons. For instance, if no
5673 * options are in use, then we have a 20-byte IP header and a
5674 * 20-byte TCP header. In this case, a 1500-byte MSS would
5675 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5676 * which is not a multiple of 8. So using an MSS of 1488 in
5677 * this case results in a TCP Data Payload of 1448 bytes which
5678 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5679 * Stamps have been negotiated, then an MTU of 1500 bytes
5680 * results in a TCP Data Payload of 1448 bytes which, as
5681 * above, is a multiple of 8 bytes ...
5682 */
5683 for (i = 0; i < NMTUS; i++)
5684 if (adap->params.mtus[i] == 1492) {
5685 adap->params.mtus[i] = 1488;
5686 break;
5687 }
5537 5688
5689 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5690 adap->params.b_wnd);
5691 }
5538 t4_init_tp_params(adap); 5692 t4_init_tp_params(adap);
5539 adap->flags |= FW_OK; 5693 adap->flags |= FW_OK;
5540 return 0; 5694 return 0;
@@ -5669,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
5669 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; 5823 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5670} 5824}
5671 5825
5672static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, 5826static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5827 unsigned int us, unsigned int cnt,
5673 unsigned int size, unsigned int iqe_size) 5828 unsigned int size, unsigned int iqe_size)
5674{ 5829{
5675 q->intr_params = QINTR_TIMER_IDX(timer_idx) | 5830 q->adap = adap;
5676 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); 5831 set_rspq_intr_params(q, us, cnt);
5677 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5678 q->iqe_len = iqe_size; 5832 q->iqe_len = iqe_size;
5679 q->size = size; 5833 q->size = size;
5680} 5834}
@@ -5688,6 +5842,7 @@ static void cfg_queues(struct adapter *adap)
5688{ 5842{
5689 struct sge *s = &adap->sge; 5843 struct sge *s = &adap->sge;
5690 int i, q10g = 0, n10g = 0, qidx = 0; 5844 int i, q10g = 0, n10g = 0, qidx = 0;
5845 int ciq_size;
5691 5846
5692 for_each_port(adap, i) 5847 for_each_port(adap, i)
5693 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 5848 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5726,12 +5881,13 @@ static void cfg_queues(struct adapter *adap)
5726 s->ofldqsets = adap->params.nports; 5881 s->ofldqsets = adap->params.nports;
5727 /* For RDMA one Rx queue per channel suffices */ 5882 /* For RDMA one Rx queue per channel suffices */
5728 s->rdmaqs = adap->params.nports; 5883 s->rdmaqs = adap->params.nports;
5884 s->rdmaciqs = adap->params.nports;
5729 } 5885 }
5730 5886
5731 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 5887 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5732 struct sge_eth_rxq *r = &s->ethrxq[i]; 5888 struct sge_eth_rxq *r = &s->ethrxq[i];
5733 5889
5734 init_rspq(&r->rspq, 0, 0, 1024, 64); 5890 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5735 r->fl.size = 72; 5891 r->fl.size = 72;
5736 } 5892 }
5737 5893
@@ -5747,7 +5903,7 @@ static void cfg_queues(struct adapter *adap)
5747 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { 5903 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5748 struct sge_ofld_rxq *r = &s->ofldrxq[i]; 5904 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5749 5905
5750 init_rspq(&r->rspq, 0, 0, 1024, 64); 5906 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
5751 r->rspq.uld = CXGB4_ULD_ISCSI; 5907 r->rspq.uld = CXGB4_ULD_ISCSI;
5752 r->fl.size = 72; 5908 r->fl.size = 72;
5753 } 5909 }
@@ -5755,13 +5911,26 @@ static void cfg_queues(struct adapter *adap)
5755 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 5911 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5756 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 5912 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5757 5913
5758 init_rspq(&r->rspq, 0, 0, 511, 64); 5914 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
5759 r->rspq.uld = CXGB4_ULD_RDMA; 5915 r->rspq.uld = CXGB4_ULD_RDMA;
5760 r->fl.size = 72; 5916 r->fl.size = 72;
5761 } 5917 }
5762 5918
5763 init_rspq(&s->fw_evtq, 6, 0, 512, 64); 5919 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5764 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); 5920 if (ciq_size > SGE_MAX_IQ_SIZE) {
5921 CH_WARN(adap, "CIQ size too small for available IQs\n");
5922 ciq_size = SGE_MAX_IQ_SIZE;
5923 }
5924
5925 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5926 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5927
5928 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
5929 r->rspq.uld = CXGB4_ULD_RDMA;
5930 }
5931
5932 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5933 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
5765} 5934}
5766 5935
5767/* 5936/*
@@ -5808,9 +5977,9 @@ static int enable_msix(struct adapter *adap)
5808 5977
5809 want = s->max_ethqsets + EXTRA_VECS; 5978 want = s->max_ethqsets + EXTRA_VECS;
5810 if (is_offload(adap)) { 5979 if (is_offload(adap)) {
5811 want += s->rdmaqs + s->ofldqsets; 5980 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
5812 /* need nchan for each possible ULD */ 5981 /* need nchan for each possible ULD */
5813 ofld_need = 2 * nchan; 5982 ofld_need = 3 * nchan;
5814 } 5983 }
5815 need = adap->params.nports + EXTRA_VECS + ofld_need; 5984 need = adap->params.nports + EXTRA_VECS + ofld_need;
5816 5985
@@ -6076,7 +6245,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6076 netdev->priv_flags |= IFF_UNICAST_FLT; 6245 netdev->priv_flags |= IFF_UNICAST_FLT;
6077 6246
6078 netdev->netdev_ops = &cxgb4_netdev_ops; 6247 netdev->netdev_ops = &cxgb4_netdev_ops;
6079 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 6248 netdev->ethtool_ops = &cxgb_ethtool_ops;
6080 } 6249 }
6081 6250
6082 pci_set_drvdata(pdev, adapter); 6251 pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a047528f..55e9daf7f9d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
232 const struct cxgb4_virt_res *vr; /* assorted HW resources */ 232 const struct cxgb4_virt_res *vr; /* assorted HW resources */
233 const unsigned short *mtus; /* MTU table */ 233 const unsigned short *mtus; /* MTU table */
234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ 234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
235 const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
235 unsigned short nrxq; /* # of Rx queues */ 236 unsigned short nrxq; /* # of Rx queues */
236 unsigned short ntxq; /* # of Tx queues */ 237 unsigned short ntxq; /* # of Tx queues */
238 unsigned short nciq; /* # of concentrator IQ */
237 unsigned char nchan:4; /* # of channels */ 239 unsigned char nchan:4; /* # of channels */
238 unsigned char nports:4; /* # of ports */ 240 unsigned char nports:4; /* # of ports */
239 unsigned char wr_cred; /* WR 16-byte credits */ 241 unsigned char wr_cred; /* WR 16-byte credits */
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
274unsigned int cxgb4_port_idx(const struct net_device *dev); 276unsigned int cxgb4_port_idx(const struct net_device *dev);
275unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 277unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
276 unsigned int *idx); 278 unsigned int *idx);
279unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
280 unsigned short header_size,
281 unsigned short data_size_max,
282 unsigned short data_size_align,
283 unsigned int *mtu_idxp);
277void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 284void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
278 struct tp_tcp_stats *v6); 285 struct tp_tcp_stats *v6);
279void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, 286void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e249528c8e60..dd4355d248e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1697 return handle_trace_pkt(q->adap, si); 1697 return handle_trace_pkt(q->adap, si);
1698 1698
1699 pkt = (const struct cpl_rx_pkt *)rsp; 1699 pkt = (const struct cpl_rx_pkt *)rsp;
1700 csum_ok = pkt->csum_calc && !pkt->err_vec; 1700 csum_ok = pkt->csum_calc && !pkt->err_vec &&
1701 (q->netdev->features & NETIF_F_RXCSUM);
1701 if ((pkt->l2info & htonl(RXF_TCP)) && 1702 if ((pkt->l2info & htonl(RXF_TCP)) &&
1702 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1703 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1703 do_gro(rxq, si, pkt); 1704 do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1720 1721
1721 rxq->stats.pkts++; 1722 rxq->stats.pkts++;
1722 1723
1723 if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) && 1724 if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1724 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1725 if (!pkt->ip_frag) { 1725 if (!pkt->ip_frag) {
1726 skb->ip_summed = CHECKSUM_UNNECESSARY; 1726 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 rxq->stats.rx_cso++; 1727 rxq->stats.rx_cso++;
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2215 iq->cntxt_id = ntohs(c.iqid); 2215 iq->cntxt_id = ntohs(c.iqid);
2216 iq->abs_id = ntohs(c.physiqid); 2216 iq->abs_id = ntohs(c.physiqid);
2217 iq->size--; /* subtract status entry */ 2217 iq->size--; /* subtract status entry */
2218 iq->adap = adap;
2219 iq->netdev = dev; 2218 iq->netdev = dev;
2220 iq->handler = hnd; 2219 iq->handler = hnd;
2221 2220
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
2515 if (oq->rspq.desc) 2514 if (oq->rspq.desc)
2516 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2515 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2517 } 2516 }
2517 for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
2518 if (oq->rspq.desc)
2519 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2520 }
2518 2521
2519 /* clean up offload Tx queues */ 2522 /* clean up offload Tx queues */
2520 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { 2523 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623be9f1e..71b799b5b0f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ 68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ 69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ 70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
71 SGE_MAX_IQ_SIZE = 65520,
71 72
72 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ 73 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
73 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ 74 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f2738c710789..973eb11aa98a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
227#define DELACK(x) ((x) << 5) 227#define DELACK(x) ((x) << 5)
228#define ULP_MODE(x) ((x) << 8) 228#define ULP_MODE(x) ((x) << 8)
229#define RCV_BUFSIZ(x) ((x) << 12) 229#define RCV_BUFSIZ(x) ((x) << 12)
230#define RCV_BUFSIZ_MASK 0x3FFU
230#define DSCP(x) ((x) << 22) 231#define DSCP(x) ((x) << 22)
231#define SMAC_SEL(x) ((u64)(x) << 28) 232#define SMAC_SEL(x) ((u64)(x) << 28)
232#define L2T_IDX(x) ((u64)(x) << 36) 233#define L2T_IDX(x) ((u64)(x) << 36)
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
278 __be64 opt0; 279 __be64 opt0;
279}; 280};
280 281
282struct cpl_t5_pass_accept_rpl {
283 WR_HDR;
284 union opcode_tid ot;
285 __be32 opt2;
286 __be64 opt0;
287 __be32 iss;
288 __be32 rsvd;
289};
290
281struct cpl_act_open_req { 291struct cpl_act_open_req {
282 WR_HDR; 292 WR_HDR;
283 union opcode_tid ot; 293 union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 52859288de7b..ff1cdd1788b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2664 netdev->priv_flags |= IFF_UNICAST_FLT; 2664 netdev->priv_flags |= IFF_UNICAST_FLT;
2665 2665
2666 netdev->netdev_ops = &cxgb4vf_netdev_ops; 2666 netdev->netdev_ops = &cxgb4vf_netdev_ops;
2667 SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops); 2667 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
2668 2668
2669 /* 2669 /*
2670 * Initialize the hardware/software state for the port. 2670 * Initialize the hardware/software state for the port.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9d88c1d50b49..bdfa80ca5e31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1510{ 1510{
1511 struct sk_buff *skb; 1511 struct sk_buff *skb;
1512 const struct cpl_rx_pkt *pkt = (void *)rsp; 1512 const struct cpl_rx_pkt *pkt = (void *)rsp;
1513 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1513 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1514 (rspq->netdev->features & NETIF_F_RXCSUM);
1514 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1515 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1515 1516
1516 /* 1517 /*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1538 skb_record_rx_queue(skb, rspq->idx); 1539 skb_record_rx_queue(skb, rspq->idx);
1539 rxq->stats.pkts++; 1540 rxq->stats.pkts++;
1540 1541
1541 if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && 1542 if (csum_ok && !pkt->err_vec &&
1542 !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1543 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1543 if (!pkt->ip_frag) 1544 if (!pkt->ip_frag)
1544 skb->ip_summed = CHECKSUM_UNNECESSARY; 1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 else { 1546 else {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0202ad..14f465f239d6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
43#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 43#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
44#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 44#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
45 45
46#define ENIC_AIC_LARGE_PKT_DIFF 3
47
46struct enic_msix_entry { 48struct enic_msix_entry {
47 int requested; 49 int requested;
48 char devname[IFNAMSIZ]; 50 char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
50 void *devid; 52 void *devid;
51}; 53};
52 54
55/* Store only the lower range. Higher range is given by fw. */
56struct enic_intr_mod_range {
57 u32 small_pkt_range_start;
58 u32 large_pkt_range_start;
59};
60
61struct enic_intr_mod_table {
62 u32 rx_rate;
63 u32 range_percent;
64};
65
66#define ENIC_MAX_LINK_SPEEDS 3
67#define ENIC_LINK_SPEED_10G 10000
68#define ENIC_LINK_SPEED_4G 4000
69#define ENIC_LINK_40G_INDEX 2
70#define ENIC_LINK_10G_INDEX 1
71#define ENIC_LINK_4G_INDEX 0
72#define ENIC_RX_COALESCE_RANGE_END 125
73#define ENIC_AIC_TS_BREAK 100
74
75struct enic_rx_coal {
76 u32 small_pkt_range_start;
77 u32 large_pkt_range_start;
78 u32 range_end;
79 u32 use_adaptive_rx_coalesce;
80};
81
53/* priv_flags */ 82/* priv_flags */
54#define ENIC_SRIOV_ENABLED (1 << 0) 83#define ENIC_SRIOV_ENABLED (1 << 0)
55 84
@@ -85,13 +114,12 @@ struct enic {
85 u32 msg_enable; 114 u32 msg_enable;
86 spinlock_t devcmd_lock; 115 spinlock_t devcmd_lock;
87 u8 mac_addr[ETH_ALEN]; 116 u8 mac_addr[ETH_ALEN];
88 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
89 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
90 unsigned int flags; 117 unsigned int flags;
91 unsigned int priv_flags; 118 unsigned int priv_flags;
92 unsigned int mc_count; 119 unsigned int mc_count;
93 unsigned int uc_count; 120 unsigned int uc_count;
94 u32 port_mtu; 121 u32 port_mtu;
122 struct enic_rx_coal rx_coalesce_setting;
95 u32 rx_coalesce_usecs; 123 u32 rx_coalesce_usecs;
96 u32 tx_coalesce_usecs; 124 u32 tx_coalesce_usecs;
97#ifdef CONFIG_PCI_IOV 125#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 4b6e5695b263..3e27df522847 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -88,7 +88,7 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
88 return err; 88 return err;
89} 89}
90 90
91int enic_dev_add_addr(struct enic *enic, u8 *addr) 91int enic_dev_add_addr(struct enic *enic, const u8 *addr)
92{ 92{
93 int err; 93 int err;
94 94
@@ -99,7 +99,7 @@ int enic_dev_add_addr(struct enic *enic, u8 *addr)
99 return err; 99 return err;
100} 100}
101 101
102int enic_dev_del_addr(struct enic *enic, u8 *addr) 102int enic_dev_del_addr(struct enic *enic, const u8 *addr)
103{ 103{
104 int err; 104 int err;
105 105
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 129b14a4efb0..36ea1ab25f6a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -45,8 +45,8 @@ int enic_dev_add_station_addr(struct enic *enic);
45int enic_dev_del_station_addr(struct enic *enic); 45int enic_dev_del_station_addr(struct enic *enic);
46int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, 46int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
47 int broadcast, int promisc, int allmulti); 47 int broadcast, int promisc, int allmulti);
48int enic_dev_add_addr(struct enic *enic, u8 *addr); 48int enic_dev_add_addr(struct enic *enic, const u8 *addr);
49int enic_dev_del_addr(struct enic *enic, u8 *addr); 49int enic_dev_del_addr(struct enic *enic, const u8 *addr);
50int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); 50int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
51int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); 51int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
52int enic_dev_notify_unset(struct enic *enic); 52int enic_dev_notify_unset(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 47e3562f4866..2e50b5489d20 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
81 81
82void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
83{
84 int i;
85 int intr;
86
87 for (i = 0; i < enic->rq_count; i++) {
88 intr = enic_msix_rq_intr(enic, i);
89 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
90 }
91}
92
82static int enic_get_settings(struct net_device *netdev, 93static int enic_get_settings(struct net_device *netdev,
83 struct ethtool_cmd *ecmd) 94 struct ethtool_cmd *ecmd)
84{ 95{
@@ -93,8 +104,8 @@ static int enic_get_settings(struct net_device *netdev,
93 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); 104 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
94 ecmd->duplex = DUPLEX_FULL; 105 ecmd->duplex = DUPLEX_FULL;
95 } else { 106 } else {
96 ethtool_cmd_speed_set(ecmd, -1); 107 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
97 ecmd->duplex = -1; 108 ecmd->duplex = DUPLEX_UNKNOWN;
98 } 109 }
99 110
100 ecmd->autoneg = AUTONEG_DISABLE; 111 ecmd->autoneg = AUTONEG_DISABLE;
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
178 struct ethtool_coalesce *ecmd) 189 struct ethtool_coalesce *ecmd)
179{ 190{
180 struct enic *enic = netdev_priv(netdev); 191 struct enic *enic = netdev_priv(netdev);
192 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
181 193
182 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; 194 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
183 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; 195 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
196 if (rxcoal->use_adaptive_rx_coalesce)
197 ecmd->use_adaptive_rx_coalesce = 1;
198 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
199 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
184 200
185 return 0; 201 return 0;
186} 202}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
191 struct enic *enic = netdev_priv(netdev); 207 struct enic *enic = netdev_priv(netdev);
192 u32 tx_coalesce_usecs; 208 u32 tx_coalesce_usecs;
193 u32 rx_coalesce_usecs; 209 u32 rx_coalesce_usecs;
210 u32 rx_coalesce_usecs_low;
211 u32 rx_coalesce_usecs_high;
212 u32 coalesce_usecs_max;
194 unsigned int i, intr; 213 unsigned int i, intr;
214 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
195 215
216 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
196 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, 217 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
197 vnic_dev_get_intr_coal_timer_max(enic->vdev)); 218 coalesce_usecs_max);
198 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, 219 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
199 vnic_dev_get_intr_coal_timer_max(enic->vdev)); 220 coalesce_usecs_max);
221
222 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
223 coalesce_usecs_max);
224 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
225 coalesce_usecs_max);
200 226
201 switch (vnic_dev_get_intr_mode(enic->vdev)) { 227 switch (vnic_dev_get_intr_mode(enic->vdev)) {
202 case VNIC_DEV_INTR_MODE_INTX: 228 case VNIC_DEV_INTR_MODE_INTX:
203 if (tx_coalesce_usecs != rx_coalesce_usecs) 229 if (tx_coalesce_usecs != rx_coalesce_usecs)
204 return -EINVAL; 230 return -EINVAL;
231 if (ecmd->use_adaptive_rx_coalesce ||
232 ecmd->rx_coalesce_usecs_low ||
233 ecmd->rx_coalesce_usecs_high)
234 return -EOPNOTSUPP;
205 235
206 intr = enic_legacy_io_intr(); 236 intr = enic_legacy_io_intr();
207 vnic_intr_coalescing_timer_set(&enic->intr[intr], 237 vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
210 case VNIC_DEV_INTR_MODE_MSI: 240 case VNIC_DEV_INTR_MODE_MSI:
211 if (tx_coalesce_usecs != rx_coalesce_usecs) 241 if (tx_coalesce_usecs != rx_coalesce_usecs)
212 return -EINVAL; 242 return -EINVAL;
243 if (ecmd->use_adaptive_rx_coalesce ||
244 ecmd->rx_coalesce_usecs_low ||
245 ecmd->rx_coalesce_usecs_high)
246 return -EOPNOTSUPP;
213 247
214 vnic_intr_coalescing_timer_set(&enic->intr[0], 248 vnic_intr_coalescing_timer_set(&enic->intr[0],
215 tx_coalesce_usecs); 249 tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
221 tx_coalesce_usecs); 255 tx_coalesce_usecs);
222 } 256 }
223 257
224 for (i = 0; i < enic->rq_count; i++) { 258 if (rxcoal->use_adaptive_rx_coalesce) {
225 intr = enic_msix_rq_intr(enic, i); 259 if (!ecmd->use_adaptive_rx_coalesce) {
226 vnic_intr_coalescing_timer_set(&enic->intr[intr], 260 rxcoal->use_adaptive_rx_coalesce = 0;
227 rx_coalesce_usecs); 261 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
262 }
263 } else {
264 if (ecmd->use_adaptive_rx_coalesce)
265 rxcoal->use_adaptive_rx_coalesce = 1;
266 else
267 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
228 } 268 }
229 269
270 if (ecmd->rx_coalesce_usecs_high) {
271 if (rx_coalesce_usecs_high <
272 (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
273 return -EINVAL;
274 rxcoal->range_end = rx_coalesce_usecs_high;
275 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
276 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
277 ENIC_AIC_LARGE_PKT_DIFF;
278 }
230 break; 279 break;
231 default: 280 default:
232 break; 281 break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
253 302
254void enic_set_ethtool_ops(struct net_device *netdev) 303void enic_set_ethtool_ops(struct net_device *netdev)
255{ 304{
256 SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops); 305 netdev->ethtool_ops = &enic_ethtool_ops;
257} 306}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718ce806..f32f828b7f3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
38#include <linux/rtnetlink.h> 38#include <linux/rtnetlink.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <net/ip6_checksum.h> 40#include <net/ip6_checksum.h>
41#include <linux/ktime.h>
41 42
42#include "cq_enet_desc.h" 43#include "cq_enet_desc.h"
43#include "vnic_dev.h" 44#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION); 73MODULE_VERSION(DRV_VERSION);
73MODULE_DEVICE_TABLE(pci, enic_id_table); 74MODULE_DEVICE_TABLE(pci, enic_id_table);
74 75
76#define ENIC_LARGE_PKT_THRESHOLD 1000
77#define ENIC_MAX_COALESCE_TIMERS 10
78/* Interrupt moderation table, which will be used to decide the
79 * coalescing timer values
80 * {rx_rate in Mbps, mapping percentage of the range}
81 */
82struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
83 {4000, 0},
84 {4400, 10},
85 {5060, 20},
86 {5230, 30},
87 {5540, 40},
88 {5820, 50},
89 {6120, 60},
90 {6435, 70},
91 {6745, 80},
92 {7000, 90},
93 {0xFFFFFFFF, 100}
94};
95
96/* This table helps the driver to pick different ranges for rx coalescing
97 * timer depending on the link speed.
98 */
99struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
100 {0, 0}, /* 0 - 4 Gbps */
101 {0, 3}, /* 4 - 10 Gbps */
102 {3, 6}, /* 10 - 40 Gbps */
103};
104
75int enic_is_dynamic(struct enic *enic) 105int enic_is_dynamic(struct enic *enic)
76{ 106{
77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 107 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -586,8 +616,71 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
586 return net_stats; 616 return net_stats;
587} 617}
588 618
619static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
620{
621 struct enic *enic = netdev_priv(netdev);
622
623 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
624 unsigned int mc_count = netdev_mc_count(netdev);
625
626 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
627 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
628
629 return -ENOSPC;
630 }
631
632 enic_dev_add_addr(enic, mc_addr);
633 enic->mc_count++;
634
635 return 0;
636}
637
638static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
639{
640 struct enic *enic = netdev_priv(netdev);
641
642 enic_dev_del_addr(enic, mc_addr);
643 enic->mc_count--;
644
645 return 0;
646}
647
648static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
649{
650 struct enic *enic = netdev_priv(netdev);
651
652 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
653 unsigned int uc_count = netdev_uc_count(netdev);
654
655 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
656 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
657
658 return -ENOSPC;
659 }
660
661 enic_dev_add_addr(enic, uc_addr);
662 enic->uc_count++;
663
664 return 0;
665}
666
667static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
668{
669 struct enic *enic = netdev_priv(netdev);
670
671 enic_dev_del_addr(enic, uc_addr);
672 enic->uc_count--;
673
674 return 0;
675}
676
589void enic_reset_addr_lists(struct enic *enic) 677void enic_reset_addr_lists(struct enic *enic)
590{ 678{
679 struct net_device *netdev = enic->netdev;
680
681 __dev_uc_unsync(netdev, NULL);
682 __dev_mc_unsync(netdev, NULL);
683
591 enic->mc_count = 0; 684 enic->mc_count = 0;
592 enic->uc_count = 0; 685 enic->uc_count = 0;
593 enic->flags = 0; 686 enic->flags = 0;
@@ -654,112 +747,6 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
654 return enic_dev_add_station_addr(enic); 747 return enic_dev_add_station_addr(enic);
655} 748}
656 749
657static void enic_update_multicast_addr_list(struct enic *enic)
658{
659 struct net_device *netdev = enic->netdev;
660 struct netdev_hw_addr *ha;
661 unsigned int mc_count = netdev_mc_count(netdev);
662 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
663 unsigned int i, j;
664
665 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
666 netdev_warn(netdev, "Registering only %d out of %d "
667 "multicast addresses\n",
668 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
669 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
670 }
671
672 /* Is there an easier way? Trying to minimize to
673 * calls to add/del multicast addrs. We keep the
674 * addrs from the last call in enic->mc_addr and
675 * look for changes to add/del.
676 */
677
678 i = 0;
679 netdev_for_each_mc_addr(ha, netdev) {
680 if (i == mc_count)
681 break;
682 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
683 }
684
685 for (i = 0; i < enic->mc_count; i++) {
686 for (j = 0; j < mc_count; j++)
687 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
688 break;
689 if (j == mc_count)
690 enic_dev_del_addr(enic, enic->mc_addr[i]);
691 }
692
693 for (i = 0; i < mc_count; i++) {
694 for (j = 0; j < enic->mc_count; j++)
695 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
696 break;
697 if (j == enic->mc_count)
698 enic_dev_add_addr(enic, mc_addr[i]);
699 }
700
701 /* Save the list to compare against next time
702 */
703
704 for (i = 0; i < mc_count; i++)
705 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
706
707 enic->mc_count = mc_count;
708}
709
710static void enic_update_unicast_addr_list(struct enic *enic)
711{
712 struct net_device *netdev = enic->netdev;
713 struct netdev_hw_addr *ha;
714 unsigned int uc_count = netdev_uc_count(netdev);
715 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
716 unsigned int i, j;
717
718 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
719 netdev_warn(netdev, "Registering only %d out of %d "
720 "unicast addresses\n",
721 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
722 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
723 }
724
725 /* Is there an easier way? Trying to minimize to
726 * calls to add/del unicast addrs. We keep the
727 * addrs from the last call in enic->uc_addr and
728 * look for changes to add/del.
729 */
730
731 i = 0;
732 netdev_for_each_uc_addr(ha, netdev) {
733 if (i == uc_count)
734 break;
735 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
736 }
737
738 for (i = 0; i < enic->uc_count; i++) {
739 for (j = 0; j < uc_count; j++)
740 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
741 break;
742 if (j == uc_count)
743 enic_dev_del_addr(enic, enic->uc_addr[i]);
744 }
745
746 for (i = 0; i < uc_count; i++) {
747 for (j = 0; j < enic->uc_count; j++)
748 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
749 break;
750 if (j == enic->uc_count)
751 enic_dev_add_addr(enic, uc_addr[i]);
752 }
753
754 /* Save the list to compare against next time
755 */
756
757 for (i = 0; i < uc_count; i++)
758 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
759
760 enic->uc_count = uc_count;
761}
762
763/* netif_tx_lock held, BHs disabled */ 750/* netif_tx_lock held, BHs disabled */
764static void enic_set_rx_mode(struct net_device *netdev) 751static void enic_set_rx_mode(struct net_device *netdev)
765{ 752{
@@ -782,9 +769,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
782 } 769 }
783 770
784 if (!promisc) { 771 if (!promisc) {
785 enic_update_unicast_addr_list(enic); 772 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
786 if (!allmulti) 773 if (!allmulti)
787 enic_update_multicast_addr_list(enic); 774 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
788 } 775 }
789} 776}
790 777
@@ -979,6 +966,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
979 return 0; 966 return 0;
980} 967}
981 968
969static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
970 u32 pkt_len)
971{
972 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
973 pkt_size->large_pkt_bytes_cnt += pkt_len;
974 else
975 pkt_size->small_pkt_bytes_cnt += pkt_len;
976}
977
982static void enic_rq_indicate_buf(struct vnic_rq *rq, 978static void enic_rq_indicate_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 979 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque) 980 int skipped, void *opaque)
@@ -986,6 +982,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
986 struct enic *enic = vnic_dev_priv(rq->vdev); 982 struct enic *enic = vnic_dev_priv(rq->vdev);
987 struct net_device *netdev = enic->netdev; 983 struct net_device *netdev = enic->netdev;
988 struct sk_buff *skb; 984 struct sk_buff *skb;
985 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
989 986
990 u8 type, color, eop, sop, ingress_port, vlan_stripped; 987 u8 type, color, eop, sop, ingress_port, vlan_stripped;
991 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 988 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1053,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1056 napi_gro_receive(&enic->napi[q_number], skb); 1053 napi_gro_receive(&enic->napi[q_number], skb);
1057 else 1054 else
1058 netif_receive_skb(skb); 1055 netif_receive_skb(skb);
1056 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1057 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1058 bytes_written);
1059 } else { 1059 } else {
1060 1060
1061 /* Buffer overflow 1061 /* Buffer overflow
@@ -1134,6 +1134,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
1134 return rq_work_done; 1134 return rq_work_done;
1135} 1135}
1136 1136
1137static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1138{
1139 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1140 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1141 u32 timer = cq->tobe_rx_coal_timeval;
1142
1143 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1144 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1145 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1146 }
1147}
1148
1149static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1150{
1151 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1152 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1153 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1154 int index;
1155 u32 timer;
1156 u32 range_start;
1157 u32 traffic;
1158 u64 delta;
1159 ktime_t now = ktime_get();
1160
1161 delta = ktime_us_delta(now, cq->prev_ts);
1162 if (delta < ENIC_AIC_TS_BREAK)
1163 return;
1164 cq->prev_ts = now;
1165
1166 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1167 pkt_size_counter->small_pkt_bytes_cnt;
1168 /* The table takes Mbps
1169 * traffic *= 8 => bits
1170 * traffic *= (10^6 / delta) => bps
1171 * traffic /= 10^6 => Mbps
1172 *
1173 * Combining, traffic *= (8 / delta)
1174 */
1175
1176 traffic <<= 3;
1177 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1178
1179 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1180 if (traffic < mod_table[index].rx_rate)
1181 break;
1182 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1183 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1184 rx_coal->small_pkt_range_start :
1185 rx_coal->large_pkt_range_start;
1186 timer = range_start + ((rx_coal->range_end - range_start) *
1187 mod_table[index].range_percent / 100);
1188 /* Damping */
1189 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1190
1191 pkt_size_counter->large_pkt_bytes_cnt = 0;
1192 pkt_size_counter->small_pkt_bytes_cnt = 0;
1193}
1194
1137static int enic_poll_msix(struct napi_struct *napi, int budget) 1195static int enic_poll_msix(struct napi_struct *napi, int budget)
1138{ 1196{
1139 struct net_device *netdev = napi->dev; 1197 struct net_device *netdev = napi->dev;
@@ -1171,6 +1229,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1171 1229
1172 if (err) 1230 if (err)
1173 work_done = work_to_do; 1231 work_done = work_to_do;
1232 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1233 /* Call the function which refreshes
1234 * the intr coalescing timer value based on
1235 * the traffic. This is supported only in
1236 * the case of MSI-x mode
1237 */
1238 enic_calc_int_moderation(enic, &enic->rq[rq]);
1174 1239
1175 if (work_done < work_to_do) { 1240 if (work_done < work_to_do) {
1176 1241
@@ -1179,6 +1244,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1179 */ 1244 */
1180 1245
1181 napi_complete(napi); 1246 napi_complete(napi);
1247 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1248 enic_set_int_moderation(enic, &enic->rq[rq]);
1182 vnic_intr_unmask(&enic->intr[intr]); 1249 vnic_intr_unmask(&enic->intr[intr]);
1183 } 1250 }
1184 1251
@@ -1314,6 +1381,42 @@ static void enic_synchronize_irqs(struct enic *enic)
1314 } 1381 }
1315} 1382}
1316 1383
1384static void enic_set_rx_coal_setting(struct enic *enic)
1385{
1386 unsigned int speed;
1387 int index = -1;
1388 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1389
1390 /* If intr mode is not MSIX, do not do adaptive coalescing */
1391 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1392 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1393 return;
1394 }
1395
1396 /* 1. Read the link speed from fw
1397 * 2. Pick the default range for the speed
1398 * 3. Update it in enic->rx_coalesce_setting
1399 */
1400 speed = vnic_dev_port_speed(enic->vdev);
1401 if (ENIC_LINK_SPEED_10G < speed)
1402 index = ENIC_LINK_40G_INDEX;
1403 else if (ENIC_LINK_SPEED_4G < speed)
1404 index = ENIC_LINK_10G_INDEX;
1405 else
1406 index = ENIC_LINK_4G_INDEX;
1407
1408 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1409 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1410 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1411
1412 /* Start with the value provided by UCSM */
1413 for (index = 0; index < enic->rq_count; index++)
1414 enic->cq[index].cur_rx_coal_timeval =
1415 enic->config.intr_timer_usec;
1416
1417 rx_coal->use_adaptive_rx_coalesce = 1;
1418}
1419
1317static int enic_dev_notify_set(struct enic *enic) 1420static int enic_dev_notify_set(struct enic *enic)
1318{ 1421{
1319 int err; 1422 int err;
@@ -2231,6 +2334,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2231 enic->notify_timer.function = enic_notify_timer; 2334 enic->notify_timer.function = enic_notify_timer;
2232 enic->notify_timer.data = (unsigned long)enic; 2335 enic->notify_timer.data = (unsigned long)enic;
2233 2336
2337 enic_set_rx_coal_setting(enic);
2234 INIT_WORK(&enic->reset, enic_reset); 2338 INIT_WORK(&enic->reset, enic_reset);
2235 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2339 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2236 2340
@@ -2250,6 +2354,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2250 } 2354 }
2251 2355
2252 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2356 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2357 /* rx coalesce time already got initialized. This gets used
2358 * if adaptive coal is turned off
2359 */
2253 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2360 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2254 2361
2255 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2362 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe803..4e6aa65857f7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
50 u32 pad10; 50 u32 pad10;
51}; 51};
52 52
53struct vnic_rx_bytes_counter {
54 unsigned int small_pkt_bytes_cnt;
55 unsigned int large_pkt_bytes_cnt;
56};
57
53struct vnic_cq { 58struct vnic_cq {
54 unsigned int index; 59 unsigned int index;
55 struct vnic_dev *vdev; 60 struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
58 unsigned int to_clean; 63 unsigned int to_clean;
59 unsigned int last_color; 64 unsigned int last_color;
60 unsigned int interrupt_offset; 65 unsigned int interrupt_offset;
66 struct vnic_rx_bytes_counter pkt_size_counter;
67 unsigned int cur_rx_coal_timeval;
68 unsigned int tobe_rx_coal_timeval;
69 ktime_t prev_ts;
61}; 70};
62 71
63static inline unsigned int vnic_cq_service(struct vnic_cq *cq, 72static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 69dd92598b7e..e86a45cb9e68 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -657,7 +657,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
657 return err; 657 return err;
658} 658}
659 659
660int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 660int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
661{ 661{
662 u64 a0 = 0, a1 = 0; 662 u64 a0 = 0, a1 = 0;
663 int wait = 1000; 663 int wait = 1000;
@@ -674,7 +674,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
674 return err; 674 return err;
675} 675}
676 676
677int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 677int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
678{ 678{
679 u64 a0 = 0, a1 = 0; 679 u64 a0 = 0, a1 = 0;
680 int wait = 1000; 680 int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index e670029862a1..1f3b301f8225 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -95,8 +95,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
95int vnic_dev_hang_notify(struct vnic_dev *vdev); 95int vnic_dev_hang_notify(struct vnic_dev *vdev);
96int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 96int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
97 int broadcast, int promisc, int allmulti); 97 int broadcast, int promisc, int allmulti);
98int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); 98int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr);
99int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 99int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr);
100int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 100int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
101int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 101int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
102int vnic_dev_notify_unset(struct vnic_dev *vdev); 102int vnic_dev_notify_unset(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8c4b93be333b..13723c96d1a2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -109,6 +109,7 @@ typedef struct board_info {
109 u8 imr_all; 109 u8 imr_all;
110 110
111 unsigned int flags; 111 unsigned int flags;
112 unsigned int in_timeout:1;
112 unsigned int in_suspend:1; 113 unsigned int in_suspend:1;
113 unsigned int wake_supported:1; 114 unsigned int wake_supported:1;
114 115
@@ -187,13 +188,13 @@ dm9000_reset(board_info_t *db)
187 * The essential point is that we have to do a double reset, and the 188 * The essential point is that we have to do a double reset, and the
188 * instruction is to set LBK into MAC internal loopback mode. 189 * instruction is to set LBK into MAC internal loopback mode.
189 */ 190 */
190 iow(db, DM9000_NCR, 0x03); 191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
191 udelay(100); /* Application note says at least 20 us */ 192 udelay(100); /* Application note says at least 20 us */
192 if (ior(db, DM9000_NCR) & 1) 193 if (ior(db, DM9000_NCR) & 1)
193 dev_err(db->dev, "dm9000 did not respond to first reset\n"); 194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
194 195
195 iow(db, DM9000_NCR, 0); 196 iow(db, DM9000_NCR, 0);
196 iow(db, DM9000_NCR, 0x03); 197 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
197 udelay(100); 198 udelay(100);
198 if (ior(db, DM9000_NCR) & 1) 199 if (ior(db, DM9000_NCR) & 1)
199 dev_err(db->dev, "dm9000 did not respond to second reset\n"); 200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
@@ -273,7 +274,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
273 */ 274 */
274static void dm9000_msleep(board_info_t *db, unsigned int ms) 275static void dm9000_msleep(board_info_t *db, unsigned int ms)
275{ 276{
276 if (db->in_suspend) 277 if (db->in_suspend || db->in_timeout)
277 mdelay(ms); 278 mdelay(ms);
278 else 279 else
279 msleep(ms); 280 msleep(ms);
@@ -334,7 +335,8 @@ dm9000_phy_write(struct net_device *dev,
334 unsigned long reg_save; 335 unsigned long reg_save;
335 336
336 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 337 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
337 mutex_lock(&db->addr_lock); 338 if (!db->in_timeout)
339 mutex_lock(&db->addr_lock);
338 340
339 spin_lock_irqsave(&db->lock, flags); 341 spin_lock_irqsave(&db->lock, flags);
340 342
@@ -365,7 +367,8 @@ dm9000_phy_write(struct net_device *dev,
365 writeb(reg_save, db->io_addr); 367 writeb(reg_save, db->io_addr);
366 368
367 spin_unlock_irqrestore(&db->lock, flags); 369 spin_unlock_irqrestore(&db->lock, flags);
368 mutex_unlock(&db->addr_lock); 370 if (!db->in_timeout)
371 mutex_unlock(&db->addr_lock);
369} 372}
370 373
371/* dm9000_set_io 374/* dm9000_set_io
@@ -882,6 +885,18 @@ dm9000_hash_table(struct net_device *dev)
882 spin_unlock_irqrestore(&db->lock, flags); 885 spin_unlock_irqrestore(&db->lock, flags);
883} 886}
884 887
888static void
889dm9000_mask_interrupts(board_info_t *db)
890{
891 iow(db, DM9000_IMR, IMR_PAR);
892}
893
894static void
895dm9000_unmask_interrupts(board_info_t *db)
896{
897 iow(db, DM9000_IMR, db->imr_all);
898}
899
885/* 900/*
886 * Initialize dm9000 board 901 * Initialize dm9000 board
887 */ 902 */
@@ -894,6 +909,9 @@ dm9000_init_dm9000(struct net_device *dev)
894 909
895 dm9000_dbg(db, 1, "entering %s\n", __func__); 910 dm9000_dbg(db, 1, "entering %s\n", __func__);
896 911
912 dm9000_reset(db);
913 dm9000_mask_interrupts(db);
914
897 /* I/O mode */ 915 /* I/O mode */
898 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 916 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
899 917
@@ -941,9 +959,6 @@ dm9000_init_dm9000(struct net_device *dev)
941 959
942 db->imr_all = imr; 960 db->imr_all = imr;
943 961
944 /* Enable TX/RX interrupt mask */
945 iow(db, DM9000_IMR, imr);
946
947 /* Init Driver variable */ 962 /* Init Driver variable */
948 db->tx_pkt_cnt = 0; 963 db->tx_pkt_cnt = 0;
949 db->queue_pkt_len = 0; 964 db->queue_pkt_len = 0;
@@ -959,17 +974,19 @@ static void dm9000_timeout(struct net_device *dev)
959 974
960 /* Save previous register address */ 975 /* Save previous register address */
961 spin_lock_irqsave(&db->lock, flags); 976 spin_lock_irqsave(&db->lock, flags);
977 db->in_timeout = 1;
962 reg_save = readb(db->io_addr); 978 reg_save = readb(db->io_addr);
963 979
964 netif_stop_queue(dev); 980 netif_stop_queue(dev);
965 dm9000_reset(db);
966 dm9000_init_dm9000(dev); 981 dm9000_init_dm9000(dev);
982 dm9000_unmask_interrupts(db);
967 /* We can accept TX packets again */ 983 /* We can accept TX packets again */
968 dev->trans_start = jiffies; /* prevent tx timeout */ 984 dev->trans_start = jiffies; /* prevent tx timeout */
969 netif_wake_queue(dev); 985 netif_wake_queue(dev);
970 986
971 /* Restore previous register address */ 987 /* Restore previous register address */
972 writeb(reg_save, db->io_addr); 988 writeb(reg_save, db->io_addr);
989 db->in_timeout = 0;
973 spin_unlock_irqrestore(&db->lock, flags); 990 spin_unlock_irqrestore(&db->lock, flags);
974} 991}
975 992
@@ -1093,7 +1110,6 @@ dm9000_rx(struct net_device *dev)
1093 if (rxbyte & DM9000_PKT_ERR) { 1110 if (rxbyte & DM9000_PKT_ERR) {
1094 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 1111 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1095 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1112 iow(db, DM9000_RCR, 0x00); /* Stop Device */
1096 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
1097 return; 1113 return;
1098 } 1114 }
1099 1115
@@ -1193,9 +1209,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1193 /* Save previous register address */ 1209 /* Save previous register address */
1194 reg_save = readb(db->io_addr); 1210 reg_save = readb(db->io_addr);
1195 1211
1196 /* Disable all interrupts */ 1212 dm9000_mask_interrupts(db);
1197 iow(db, DM9000_IMR, IMR_PAR);
1198
1199 /* Got DM9000 interrupt status */ 1213 /* Got DM9000 interrupt status */
1200 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1214 int_status = ior(db, DM9000_ISR); /* Got ISR */
1201 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1215 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
@@ -1218,9 +1232,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1218 } 1232 }
1219 } 1233 }
1220 1234
1221 /* Re-enable interrupt mask */ 1235 dm9000_unmask_interrupts(db);
1222 iow(db, DM9000_IMR, db->imr_all);
1223
1224 /* Restore previous register address */ 1236 /* Restore previous register address */
1225 writeb(reg_save, db->io_addr); 1237 writeb(reg_save, db->io_addr);
1226 1238
@@ -1292,6 +1304,9 @@ dm9000_open(struct net_device *dev)
1292 * may work, and tell the user that this is a problem */ 1304 * may work, and tell the user that this is a problem */
1293 1305
1294 if (irqflags == IRQF_TRIGGER_NONE) 1306 if (irqflags == IRQF_TRIGGER_NONE)
1307 irqflags = irq_get_trigger_type(dev->irq);
1308
1309 if (irqflags == IRQF_TRIGGER_NONE)
1295 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1296 1311
1297 irqflags |= IRQF_SHARED; 1312 irqflags |= IRQF_SHARED;
@@ -1301,11 +1316,14 @@ dm9000_open(struct net_device *dev)
1301 mdelay(1); /* delay needs by DM9000B */ 1316 mdelay(1); /* delay needs by DM9000B */
1302 1317
1303 /* Initialize DM9000 board */ 1318 /* Initialize DM9000 board */
1304 dm9000_reset(db);
1305 dm9000_init_dm9000(dev); 1319 dm9000_init_dm9000(dev);
1306 1320
1307 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1321 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1308 return -EAGAIN; 1322 return -EAGAIN;
1323 /* Now that we have an interrupt handler hooked up we can unmask
1324 * our interrupts
1325 */
1326 dm9000_unmask_interrupts(db);
1309 1327
1310 /* Init driver variable */ 1328 /* Init driver variable */
1311 db->dbug_cnt = 0; 1329 db->dbug_cnt = 0;
@@ -1313,7 +1331,8 @@ dm9000_open(struct net_device *dev)
1313 mii_check_media(&db->mii, netif_msg_link(db), 1); 1331 mii_check_media(&db->mii, netif_msg_link(db), 1);
1314 netif_start_queue(dev); 1332 netif_start_queue(dev);
1315 1333
1316 dm9000_schedule_poll(db); 1334 /* Poll initial link status */
1335 schedule_delayed_work(&db->phy_poll, 1);
1317 1336
1318 return 0; 1337 return 0;
1319} 1338}
@@ -1326,7 +1345,7 @@ dm9000_shutdown(struct net_device *dev)
1326 /* RESET device */ 1345 /* RESET device */
1327 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1346 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1328 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1347 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1329 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */ 1348 dm9000_mask_interrupts(db);
1330 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1349 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1331} 1350}
1332 1351
@@ -1547,12 +1566,7 @@ dm9000_probe(struct platform_device *pdev)
1547 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1566 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1548#endif 1567#endif
1549 1568
1550 /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), 1569 dm9000_reset(db);
1551 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
1552 * while probe stage.
1553 */
1554
1555 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1556 1570
1557 /* try multiple times, DM9000 sometimes gets the read wrong */ 1571 /* try multiple times, DM9000 sometimes gets the read wrong */
1558 for (i = 0; i < 8; i++) { 1572 for (i = 0; i < 8; i++) {
@@ -1695,8 +1709,8 @@ dm9000_drv_resume(struct device *dev)
1695 /* reset if we were not in wake mode to ensure if 1709 /* reset if we were not in wake mode to ensure if
1696 * the device was powered off it is in a known state */ 1710 * the device was powered off it is in a known state */
1697 if (!db->wake_state) { 1711 if (!db->wake_state) {
1698 dm9000_reset(db);
1699 dm9000_init_dm9000(ndev); 1712 dm9000_init_dm9000(ndev);
1713 dm9000_unmask_interrupts(db);
1700 } 1714 }
1701 1715
1702 netif_device_attach(ndev); 1716 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1642de78aac8..861660841ce2 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1703#ifdef CONFIG_TULIP_NAPI 1703#ifdef CONFIG_TULIP_NAPI
1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16); 1704 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1705#endif 1705#endif
1706 SET_ETHTOOL_OPS(dev, &ops); 1706 dev->ethtool_ops = &ops;
1707 1707
1708 if (register_netdev(dev)) 1708 if (register_netdev(dev))
1709 goto err_out_free_ring; 1709 goto err_out_free_ring;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa801a6af7b9..80afec335a11 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -962,8 +962,8 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
962 } 962 }
963 if(db->link_failed) 963 if(db->link_failed)
964 { 964 {
965 ethtool_cmd_speed_set(ecmd, -1); 965 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
966 ecmd->duplex = -1; 966 ecmd->duplex = DUPLEX_UNKNOWN;
967 } 967 }
968 968
969 if (db->media_mode & ULI526X_AUTO) 969 if (db->media_mode & ULI526X_AUTO)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 4fb756d219f7..1274b6fdac8a 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
227 } 227 }
228 dev->netdev_ops = &netdev_ops; 228 dev->netdev_ops = &netdev_ops;
229 dev->watchdog_timeo = TX_TIMEOUT; 229 dev->watchdog_timeo = TX_TIMEOUT;
230 SET_ETHTOOL_OPS(dev, &ethtool_ops); 230 dev->ethtool_ops = &ethtool_ops;
231#if 0 231#if 0
232 dev->features = NETIF_F_IP_CSUM; 232 dev->features = NETIF_F_IP_CSUM;
233#endif 233#endif
@@ -1185,8 +1185,8 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1185 ethtool_cmd_speed_set(cmd, np->speed); 1185 ethtool_cmd_speed_set(cmd, np->speed);
1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1187 } else { 1187 } else {
1188 ethtool_cmd_speed_set(cmd, -1); 1188 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1189 cmd->duplex = -1; 1189 cmd->duplex = DUPLEX_UNKNOWN;
1190 } 1190 }
1191 if ( np->an_enable) 1191 if ( np->an_enable)
1192 cmd->autoneg = AUTONEG_ENABLE; 1192 cmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d9e5ca0d48c1..433c1e185442 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
577 577
578 /* The chip-specific entries in the device structure. */ 578 /* The chip-specific entries in the device structure. */
579 dev->netdev_ops = &netdev_ops; 579 dev->netdev_ops = &netdev_ops;
580 SET_ETHTOOL_OPS(dev, &ethtool_ops); 580 dev->ethtool_ops = &ethtool_ops;
581 dev->watchdog_timeo = TX_TIMEOUT; 581 dev->watchdog_timeo = TX_TIMEOUT;
582 582
583 pci_set_drvdata(pdev, dev); 583 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 4884205e56ee..056b44b93477 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -134,17 +134,17 @@ struct ec_bhf_priv {
134 134
135 struct pci_dev *dev; 135 struct pci_dev *dev;
136 136
137 void * __iomem io; 137 void __iomem *io;
138 void * __iomem dma_io; 138 void __iomem *dma_io;
139 139
140 struct hrtimer hrtimer; 140 struct hrtimer hrtimer;
141 141
142 int tx_dma_chan; 142 int tx_dma_chan;
143 int rx_dma_chan; 143 int rx_dma_chan;
144 void * __iomem ec_io; 144 void __iomem *ec_io;
145 void * __iomem fifo_io; 145 void __iomem *fifo_io;
146 void * __iomem mii_io; 146 void __iomem *mii_io;
147 void * __iomem mac_io; 147 void __iomem *mac_io;
148 148
149 struct bhf_dma rx_buf; 149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs; 150 struct rx_desc *rx_descs;
@@ -297,7 +297,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{ 297{
298 struct device *dev = PRIV_TO_DEV(priv); 298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i; 299 unsigned block_count, i;
300 void * __iomem ec_info; 300 void __iomem *ec_info;
301 301
302 dev_dbg(dev, "Info block:\n"); 302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); 303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
@@ -569,8 +569,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{ 569{
570 struct net_device *net_dev; 570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv; 571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io; 572 void __iomem *dma_io;
573 void * __iomem io; 573 void __iomem *io;
574 int err = 0; 574 int err = 0;
575 575
576 err = pci_enable_device(dev); 576 err = pci_enable_device(dev);
@@ -615,7 +615,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
615 } 615 }
616 616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); 617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) { 618 if (net_dev == NULL) {
619 err = -ENOMEM; 619 err = -ENOMEM;
620 goto err_unmap_dma_io; 620 goto err_unmap_dma_io;
621 } 621 }
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..2e7c5553955e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ 120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
121#define FW_VER_LEN 32 121#define FW_VER_LEN 32
122 122
123#define RSS_INDIR_TABLE_LEN 128
124#define RSS_HASH_KEY_LEN 40
125
123struct be_dma_mem { 126struct be_dma_mem {
124 void *va; 127 void *va;
125 dma_addr_t dma; 128 dma_addr_t dma;
@@ -371,6 +374,7 @@ enum vf_state {
371#define BE_FLAGS_LINK_STATUS_INIT 1 374#define BE_FLAGS_LINK_STATUS_INIT 1
372#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 375#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
373#define BE_FLAGS_VLAN_PROMISC (1 << 4) 376#define BE_FLAGS_VLAN_PROMISC (1 << 4)
377#define BE_FLAGS_MCAST_PROMISC (1 << 5)
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 378#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 379#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 380#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
@@ -409,6 +413,13 @@ struct be_resources {
409 u32 if_cap_flags; 413 u32 if_cap_flags;
410}; 414};
411 415
416struct rss_info {
417 u64 rss_flags;
418 u8 rsstable[RSS_INDIR_TABLE_LEN];
419 u8 rss_queue[RSS_INDIR_TABLE_LEN];
420 u8 rss_hkey[RSS_HASH_KEY_LEN];
421};
422
412struct be_adapter { 423struct be_adapter {
413 struct pci_dev *pdev; 424 struct pci_dev *pdev;
414 struct net_device *netdev; 425 struct net_device *netdev;
@@ -445,7 +456,7 @@ struct be_adapter {
445 struct be_drv_stats drv_stats; 456 struct be_drv_stats drv_stats;
446 struct be_aic_obj aic_obj[MAX_EVT_QS]; 457 struct be_aic_obj aic_obj[MAX_EVT_QS];
447 u16 vlans_added; 458 u16 vlans_added;
448 u8 vlan_tag[VLAN_N_VID]; 459 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
449 u8 vlan_prio_bmap; /* Available Priority BitMap */ 460 u8 vlan_prio_bmap; /* Available Priority BitMap */
450 u16 recommended_prio; /* Recommended Priority */ 461 u16 recommended_prio; /* Recommended Priority */
451 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ 462 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +518,7 @@ struct be_adapter {
507 u32 msg_enable; 518 u32 msg_enable;
508 int be_get_temp_freq; 519 int be_get_temp_freq;
509 u8 pf_number; 520 u8 pf_number;
510 u64 rss_flags; 521 struct rss_info rss_info;
511}; 522};
512 523
513#define be_physfn(adapter) (!adapter->virtfn) 524#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..f4ea3490f446 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
52 } 52 }
53}; 53};
54 54
55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, 55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
56 u8 subsystem)
57{ 56{
58 int i; 57 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); 58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
120 return (void *)addr; 119 return (void *)addr;
121} 120}
122 121
123static int be_mcc_compl_process(struct be_adapter *adapter, 122static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
124 struct be_mcc_compl *compl)
125{ 123{
126 u16 compl_status, extd_status; 124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
127 struct be_cmd_resp_hdr *resp_hdr; 125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
128 u8 opcode = 0, subsystem = 0; 126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
129 127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
130 /* Just swap the status to host endian; mcc tag is opaquely copied 128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
131 * from mcc_wrb */ 129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
132 be_dws_le_to_cpu(compl, 4); 130 return true;
133 131 else
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 132 return false;
135 CQE_STATUS_COMPL_MASK; 133}
136 134
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 135/* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
137 */
138static void be_async_cmd_process(struct be_adapter *adapter,
139 struct be_mcc_compl *compl,
140 struct be_cmd_resp_hdr *resp_hdr)
141{
142 enum mcc_base_status base_status = base_status(compl->status);
143 u8 opcode = 0, subsystem = 0;
138 144
139 if (resp_hdr) { 145 if (resp_hdr) {
140 opcode = resp_hdr->opcode; 146 opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl); 152 complete(&adapter->et_cmd_compl);
147 return 0; 153 return;
148 } 154 }
149 155
150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 157 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
152 (subsystem == CMD_SUBSYSTEM_COMMON)) { 158 subsystem == CMD_SUBSYSTEM_COMMON) {
153 adapter->flash_status = compl_status; 159 adapter->flash_status = compl->status;
154 complete(&adapter->et_cmd_compl); 160 complete(&adapter->et_cmd_compl);
161 return;
155 } 162 }
156 163
157 if (compl_status == MCC_STATUS_SUCCESS) { 164 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
158 if (((opcode == OPCODE_ETH_GET_STATISTICS) || 165 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
159 (opcode == OPCODE_ETH_GET_PPORT_STATS)) && 166 subsystem == CMD_SUBSYSTEM_ETH &&
160 (subsystem == CMD_SUBSYSTEM_ETH)) { 167 base_status == MCC_STATUS_SUCCESS) {
161 be_parse_stats(adapter); 168 be_parse_stats(adapter);
162 adapter->stats_cmd_sent = false; 169 adapter->stats_cmd_sent = false;
163 } 170 return;
164 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 171 }
165 subsystem == CMD_SUBSYSTEM_COMMON) { 172
173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 if (base_status == MCC_STATUS_SUCCESS) {
166 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 176 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
167 (void *)resp_hdr; 177 (void *)resp_hdr;
168 adapter->drv_stats.be_on_die_temperature = 178 adapter->drv_stats.be_on_die_temperature =
169 resp->on_die_temperature; 179 resp->on_die_temperature;
170 } 180 } else {
171 } else {
172 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
173 adapter->be_get_temp_freq = 0; 181 adapter->be_get_temp_freq = 0;
182 }
183 return;
184 }
185}
186
187static int be_mcc_compl_process(struct be_adapter *adapter,
188 struct be_mcc_compl *compl)
189{
190 enum mcc_base_status base_status;
191 enum mcc_addl_status addl_status;
192 struct be_cmd_resp_hdr *resp_hdr;
193 u8 opcode = 0, subsystem = 0;
194
195 /* Just swap the status to host endian; mcc tag is opaquely copied
196 * from mcc_wrb */
197 be_dws_le_to_cpu(compl, 4);
198
199 base_status = base_status(compl->status);
200 addl_status = addl_status(compl->status);
201
202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
203 if (resp_hdr) {
204 opcode = resp_hdr->opcode;
205 subsystem = resp_hdr->subsystem;
206 }
207
208 be_async_cmd_process(adapter, compl, resp_hdr);
174 209
175 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 210 if (base_status != MCC_STATUS_SUCCESS &&
176 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 211 !be_skip_err_log(opcode, base_status, addl_status)) {
177 goto done;
178 212
179 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
180 dev_warn(&adapter->pdev->dev, 214 dev_warn(&adapter->pdev->dev,
181 "VF is not privileged to issue opcode %d-%d\n", 215 "VF is not privileged to issue opcode %d-%d\n",
182 opcode, subsystem); 216 opcode, subsystem);
183 } else { 217 } else {
184 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
185 CQE_STATUS_EXTD_MASK;
186 dev_err(&adapter->pdev->dev, 218 dev_err(&adapter->pdev->dev,
187 "opcode %d-%d failed:status %d-%d\n", 219 "opcode %d-%d failed:status %d-%d\n",
188 opcode, subsystem, compl_status, extd_status); 220 opcode, subsystem, base_status, addl_status);
189
190 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
191 return extd_status;
192 } 221 }
193 } 222 }
194done: 223 return compl->status;
195 return compl_status;
196} 224}
197 225
198/* Link state evt is a string of bytes; no need for endian swapping */ 226/* Link state evt is a string of bytes; no need for endian swapping */
199static void be_async_link_state_process(struct be_adapter *adapter, 227static void be_async_link_state_process(struct be_adapter *adapter,
200 struct be_async_event_link_state *evt) 228 struct be_mcc_compl *compl)
201{ 229{
230 struct be_async_event_link_state *evt =
231 (struct be_async_event_link_state *)compl;
232
202 /* When link status changes, link speed must be re-queried from FW */ 233 /* When link status changes, link speed must be re-queried from FW */
203 adapter->phy.link_speed = -1; 234 adapter->phy.link_speed = -1;
204 235
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
221 252
222/* Grp5 CoS Priority evt */ 253/* Grp5 CoS Priority evt */
223static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 254static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
224 struct be_async_event_grp5_cos_priority *evt) 255 struct be_mcc_compl *compl)
225{ 256{
257 struct be_async_event_grp5_cos_priority *evt =
258 (struct be_async_event_grp5_cos_priority *)compl;
259
226 if (evt->valid) { 260 if (evt->valid) {
227 adapter->vlan_prio_bmap = evt->available_priority_bmap; 261 adapter->vlan_prio_bmap = evt->available_priority_bmap;
228 adapter->recommended_prio &= ~VLAN_PRIO_MASK; 262 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
233 267
234/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 268/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
235static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 269static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
236 struct be_async_event_grp5_qos_link_speed *evt) 270 struct be_mcc_compl *compl)
237{ 271{
272 struct be_async_event_grp5_qos_link_speed *evt =
273 (struct be_async_event_grp5_qos_link_speed *)compl;
274
238 if (adapter->phy.link_speed >= 0 && 275 if (adapter->phy.link_speed >= 0 &&
239 evt->physical_port == adapter->port_num) 276 evt->physical_port == adapter->port_num)
240 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 277 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
242 279
243/*Grp5 PVID evt*/ 280/*Grp5 PVID evt*/
244static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 281static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
245 struct be_async_event_grp5_pvid_state *evt) 282 struct be_mcc_compl *compl)
246{ 283{
284 struct be_async_event_grp5_pvid_state *evt =
285 (struct be_async_event_grp5_pvid_state *)compl;
286
247 if (evt->enabled) { 287 if (evt->enabled) {
248 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 288 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
249 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 289 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
253} 293}
254 294
255static void be_async_grp5_evt_process(struct be_adapter *adapter, 295static void be_async_grp5_evt_process(struct be_adapter *adapter,
256 u32 trailer, struct be_mcc_compl *evt) 296 struct be_mcc_compl *compl)
257{ 297{
258 u8 event_type = 0; 298 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
259 299 ASYNC_EVENT_TYPE_MASK;
260 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
261 ASYNC_TRAILER_EVENT_TYPE_MASK;
262 300
263 switch (event_type) { 301 switch (event_type) {
264 case ASYNC_EVENT_COS_PRIORITY: 302 case ASYNC_EVENT_COS_PRIORITY:
265 be_async_grp5_cos_priority_process(adapter, 303 be_async_grp5_cos_priority_process(adapter, compl);
266 (struct be_async_event_grp5_cos_priority *)evt); 304 break;
267 break;
268 case ASYNC_EVENT_QOS_SPEED: 305 case ASYNC_EVENT_QOS_SPEED:
269 be_async_grp5_qos_speed_process(adapter, 306 be_async_grp5_qos_speed_process(adapter, compl);
270 (struct be_async_event_grp5_qos_link_speed *)evt); 307 break;
271 break;
272 case ASYNC_EVENT_PVID_STATE: 308 case ASYNC_EVENT_PVID_STATE:
273 be_async_grp5_pvid_state_process(adapter, 309 be_async_grp5_pvid_state_process(adapter, compl);
274 (struct be_async_event_grp5_pvid_state *)evt); 310 break;
275 break;
276 default: 311 default:
277 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", 312 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
278 event_type); 313 event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
281} 316}
282 317
283static void be_async_dbg_evt_process(struct be_adapter *adapter, 318static void be_async_dbg_evt_process(struct be_adapter *adapter,
284 u32 trailer, struct be_mcc_compl *cmp) 319 struct be_mcc_compl *cmp)
285{ 320{
286 u8 event_type = 0; 321 u8 event_type = 0;
287 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; 322 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
288 323
289 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & 324 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
290 ASYNC_TRAILER_EVENT_TYPE_MASK; 325 ASYNC_EVENT_TYPE_MASK;
291 326
292 switch (event_type) { 327 switch (event_type) {
293 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 328 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
302 } 337 }
303} 338}
304 339
305static inline bool is_link_state_evt(u32 trailer) 340static inline bool is_link_state_evt(u32 flags)
306{ 341{
307 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 342 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
308 ASYNC_TRAILER_EVENT_CODE_MASK) == 343 ASYNC_EVENT_CODE_LINK_STATE;
309 ASYNC_EVENT_CODE_LINK_STATE;
310} 344}
311 345
312static inline bool is_grp5_evt(u32 trailer) 346static inline bool is_grp5_evt(u32 flags)
313{ 347{
314 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 348 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
315 ASYNC_TRAILER_EVENT_CODE_MASK) == 349 ASYNC_EVENT_CODE_GRP_5;
316 ASYNC_EVENT_CODE_GRP_5);
317} 350}
318 351
319static inline bool is_dbg_evt(u32 trailer) 352static inline bool is_dbg_evt(u32 flags)
320{ 353{
321 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 354 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
322 ASYNC_TRAILER_EVENT_CODE_MASK) == 355 ASYNC_EVENT_CODE_QNQ;
323 ASYNC_EVENT_CODE_QNQ); 356}
357
358static void be_mcc_event_process(struct be_adapter *adapter,
359 struct be_mcc_compl *compl)
360{
361 if (is_link_state_evt(compl->flags))
362 be_async_link_state_process(adapter, compl);
363 else if (is_grp5_evt(compl->flags))
364 be_async_grp5_evt_process(adapter, compl);
365 else if (is_dbg_evt(compl->flags))
366 be_async_dbg_evt_process(adapter, compl);
324} 367}
325 368
326static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 369static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
362 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 405 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
363 406
364 spin_lock(&adapter->mcc_cq_lock); 407 spin_lock(&adapter->mcc_cq_lock);
408
365 while ((compl = be_mcc_compl_get(adapter))) { 409 while ((compl = be_mcc_compl_get(adapter))) {
366 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 410 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
367 /* Interpret flags as an async trailer */ 411 be_mcc_event_process(adapter, compl);
368 if (is_link_state_evt(compl->flags))
369 be_async_link_state_process(adapter,
370 (struct be_async_event_link_state *) compl);
371 else if (is_grp5_evt(compl->flags))
372 be_async_grp5_evt_process(adapter,
373 compl->flags, compl);
374 else if (is_dbg_evt(compl->flags))
375 be_async_dbg_evt_process(adapter,
376 compl->flags, compl);
377 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 412 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
378 status = be_mcc_compl_process(adapter, compl); 413 status = be_mcc_compl_process(adapter, compl);
379 atomic_dec(&mcc_obj->q.used); 414 atomic_dec(&mcc_obj->q.used);
380 } 415 }
381 be_mcc_compl_use(compl); 416 be_mcc_compl_use(compl);
382 num++; 417 num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
436 if (status == -EIO) 471 if (status == -EIO)
437 goto out; 472 goto out;
438 473
439 status = resp->status; 474 status = (resp->base_status |
475 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
476 CQE_ADDL_STATUS_SHIFT));
440out: 477out:
441 return status; 478 return status;
442} 479}
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
560 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 597 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
561 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 598 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
562 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 599 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
563 sliport_err1 = ioread32(adapter->db + 600 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
564 SLIPORT_ERROR1_OFFSET); 601 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
565 sliport_err2 = ioread32(adapter->db +
566 SLIPORT_ERROR2_OFFSET);
567 602
568 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && 603 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
569 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) 604 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
630 if (stage == POST_STAGE_ARMFW_RDY) 665 if (stage == POST_STAGE_ARMFW_RDY)
631 return 0; 666 return 0;
632 667
633 dev_info(dev, "Waiting for POST, %ds elapsed\n", 668 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
634 timeout);
635 if (msleep_interruptible(2000)) { 669 if (msleep_interruptible(2000)) {
636 dev_err(dev, "Waiting for POST aborted\n"); 670 dev_err(dev, "Waiting for POST aborted\n");
637 return -EINTR; 671 return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
649 return &wrb->payload.sgl[0]; 683 return &wrb->payload.sgl[0];
650} 684}
651 685
652static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, 686static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
653 unsigned long addr)
654{ 687{
655 wrb->tag0 = addr & 0xFFFFFFFF; 688 wrb->tag0 = addr & 0xFFFFFFFF;
656 wrb->tag1 = upper_32_bits(addr); 689 wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
659/* Don't touch the hdr after it's prepared */ 692/* Don't touch the hdr after it's prepared */
660/* mem will be NULL for embedded commands */ 693/* mem will be NULL for embedded commands */
661static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 694static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
662 u8 subsystem, u8 opcode, int cmd_len, 695 u8 subsystem, u8 opcode, int cmd_len,
663 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 696 struct be_mcc_wrb *wrb,
697 struct be_dma_mem *mem)
664{ 698{
665 struct be_sge *sge; 699 struct be_sge *sge;
666 700
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
683} 717}
684 718
685static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 719static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
686 struct be_dma_mem *mem) 720 struct be_dma_mem *mem)
687{ 721{
688 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 722 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
689 u64 dma = (u64)mem->dma; 723 u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
868 req = embedded_payload(wrb); 902 req = embedded_payload(wrb);
869 903
870 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
871 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 905 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
906 NULL);
872 907
873 /* Support for EQ_CREATEv2 available only SH-R onwards */ 908 /* Support for EQ_CREATEv2 available only SH-R onwards */
874 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 909 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
917 req = embedded_payload(wrb); 952 req = embedded_payload(wrb);
918 953
919 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 954 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
920 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 955 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
956 NULL);
921 req->type = MAC_ADDRESS_TYPE_NETWORK; 957 req->type = MAC_ADDRESS_TYPE_NETWORK;
922 if (permanent) { 958 if (permanent) {
923 req->permanent = 1; 959 req->permanent = 1;
@@ -940,7 +976,7 @@ err:
940 976
941/* Uses synchronous MCCQ */ 977/* Uses synchronous MCCQ */
942int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 978int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
943 u32 if_id, u32 *pmac_id, u32 domain) 979 u32 if_id, u32 *pmac_id, u32 domain)
944{ 980{
945 struct be_mcc_wrb *wrb; 981 struct be_mcc_wrb *wrb;
946 struct be_cmd_req_pmac_add *req; 982 struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
956 req = embedded_payload(wrb); 992 req = embedded_payload(wrb);
957 993
958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 994 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
959 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); 995 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
996 NULL);
960 997
961 req->hdr.domain = domain; 998 req->hdr.domain = domain;
962 req->if_id = cpu_to_le32(if_id); 999 req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
1012 1049
1013/* Uses Mbox */ 1050/* Uses Mbox */
1014int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1051int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1015 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1052 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1016{ 1053{
1017 struct be_mcc_wrb *wrb; 1054 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_cq_create *req; 1055 struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1028 ctxt = &req->context; 1065 ctxt = &req->context;
1029 1066
1030 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1031 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 1068 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1069 NULL);
1032 1070
1033 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1071 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1034 1072
1035 if (BEx_chip(adapter)) { 1073 if (BEx_chip(adapter)) {
1036 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1074 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1037 coalesce_wm); 1075 coalesce_wm);
1038 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1076 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1039 ctxt, no_delay); 1077 ctxt, no_delay);
1040 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1078 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1041 __ilog2_u32(cq->len/256)); 1079 __ilog2_u32(cq->len / 256));
1042 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1080 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1043 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1081 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1044 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1082 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1053 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1091 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1054 ctxt, coalesce_wm); 1092 ctxt, coalesce_wm);
1055 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1093 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1056 no_delay); 1094 no_delay);
1057 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1095 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1058 __ilog2_u32(cq->len/256)); 1096 __ilog2_u32(cq->len / 256));
1059 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1097 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1060 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, 1098 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1061 ctxt, 1); 1099 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1062 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1063 ctxt, eq->id);
1064 } 1100 }
1065 1101
1066 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1102 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
1088} 1124}
1089 1125
1090static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1126static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1091 struct be_queue_info *mccq, 1127 struct be_queue_info *mccq,
1092 struct be_queue_info *cq) 1128 struct be_queue_info *cq)
1093{ 1129{
1094 struct be_mcc_wrb *wrb; 1130 struct be_mcc_wrb *wrb;
1095 struct be_cmd_req_mcc_ext_create *req; 1131 struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1105 ctxt = &req->context; 1141 ctxt = &req->context;
1106 1142
1107 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1108 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); 1144 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1145 NULL);
1109 1146
1110 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1147 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1111 if (BEx_chip(adapter)) { 1148 if (BEx_chip(adapter)) {
1112 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1149 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1113 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1150 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1114 be_encoded_q_len(mccq->len)); 1151 be_encoded_q_len(mccq->len));
1115 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1152 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1116 } else { 1153 } else {
1117 req->hdr.version = 1; 1154 req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1145} 1182}
1146 1183
1147static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1184static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1148 struct be_queue_info *mccq, 1185 struct be_queue_info *mccq,
1149 struct be_queue_info *cq) 1186 struct be_queue_info *cq)
1150{ 1187{
1151 struct be_mcc_wrb *wrb; 1188 struct be_mcc_wrb *wrb;
1152 struct be_cmd_req_mcc_create *req; 1189 struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1162 ctxt = &req->context; 1199 ctxt = &req->context;
1163 1200
1164 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1165 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); 1202 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1203 NULL);
1166 1204
1167 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1205 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1168 1206
1169 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1207 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1170 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1208 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1171 be_encoded_q_len(mccq->len)); 1209 be_encoded_q_len(mccq->len));
1172 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1210 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1173 1211
1174 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1212 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1187} 1225}
1188 1226
1189int be_cmd_mccq_create(struct be_adapter *adapter, 1227int be_cmd_mccq_create(struct be_adapter *adapter,
1190 struct be_queue_info *mccq, 1228 struct be_queue_info *mccq, struct be_queue_info *cq)
1191 struct be_queue_info *cq)
1192{ 1229{
1193 int status; 1230 int status;
1194 1231
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1213 1250
1214 req = embedded_payload(&wrb); 1251 req = embedded_payload(&wrb);
1215 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1216 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1253 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1217 1254
1218 if (lancer_chip(adapter)) { 1255 if (lancer_chip(adapter)) {
1219 req->hdr.version = 1; 1256 req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1250 1287
1251/* Uses MCC */ 1288/* Uses MCC */
1252int be_cmd_rxq_create(struct be_adapter *adapter, 1289int be_cmd_rxq_create(struct be_adapter *adapter,
1253 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1290 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1254 u32 if_id, u32 rss, u8 *rss_id) 1291 u32 if_id, u32 rss, u8 *rss_id)
1255{ 1292{
1256 struct be_mcc_wrb *wrb; 1293 struct be_mcc_wrb *wrb;
1257 struct be_cmd_req_eth_rx_create *req; 1294 struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
1268 req = embedded_payload(wrb); 1305 req = embedded_payload(wrb);
1269 1306
1270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1307 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1271 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1308 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1272 1309
1273 req->cq_id = cpu_to_le16(cq_id); 1310 req->cq_id = cpu_to_le16(cq_id);
1274 req->frag_size = fls(frag_size) - 1; 1311 req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
1295 * Uses Mbox 1332 * Uses Mbox
1296 */ 1333 */
1297int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1334int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1298 int queue_type) 1335 int queue_type)
1299{ 1336{
1300 struct be_mcc_wrb *wrb; 1337 struct be_mcc_wrb *wrb;
1301 struct be_cmd_req_q_destroy *req; 1338 struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1334 } 1371 }
1335 1372
1336 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1373 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1337 NULL); 1374 NULL);
1338 req->id = cpu_to_le16(q->id); 1375 req->id = cpu_to_le16(q->id);
1339 1376
1340 status = be_mbox_notify_wait(adapter); 1377 status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1361 req = embedded_payload(wrb); 1398 req = embedded_payload(wrb);
1362 1399
1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1364 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1401 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1365 req->id = cpu_to_le16(q->id); 1402 req->id = cpu_to_le16(q->id);
1366 1403
1367 status = be_mcc_notify_wait(adapter); 1404 status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1384 1421
1385 req = embedded_payload(&wrb); 1422 req = embedded_payload(&wrb);
1386 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1423 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1387 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL); 1424 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1425 sizeof(*req), &wrb, NULL);
1388 req->hdr.domain = domain; 1426 req->hdr.domain = domain;
1389 req->capability_flags = cpu_to_le32(cap_flags); 1427 req->capability_flags = cpu_to_le32(cap_flags);
1390 req->enable_flags = cpu_to_le32(en_flags); 1428 req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1422 req = embedded_payload(wrb); 1460 req = embedded_payload(wrb);
1423 1461
1424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1425 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); 1463 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1464 sizeof(*req), wrb, NULL);
1426 req->hdr.domain = domain; 1465 req->hdr.domain = domain;
1427 req->interface_id = cpu_to_le32(interface_id); 1466 req->interface_id = cpu_to_le32(interface_id);
1428 1467
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1452 hdr = nonemb_cmd->va; 1491 hdr = nonemb_cmd->va;
1453 1492
1454 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1493 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1455 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1494 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1495 nonemb_cmd);
1456 1496
1457 /* version 1 of the cmd is not supported only by BE2 */ 1497 /* version 1 of the cmd is not supported only by BE2 */
1458 if (BE2_chip(adapter)) 1498 if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
1472 1512
1473/* Lancer Stats */ 1513/* Lancer Stats */
1474int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1514int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1475 struct be_dma_mem *nonemb_cmd) 1515 struct be_dma_mem *nonemb_cmd)
1476{ 1516{
1477 1517
1478 struct be_mcc_wrb *wrb; 1518 struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1493 req = nonemb_cmd->va; 1533 req = nonemb_cmd->va;
1494 1534
1495 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1496 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1536 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1497 nonemb_cmd); 1537 wrb, nonemb_cmd);
1498 1538
1499 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1539 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1500 req->cmd_params.params.reset_stats = 0; 1540 req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1553 req = embedded_payload(wrb); 1593 req = embedded_payload(wrb);
1554 1594
1555 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1556 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1596 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1597 sizeof(*req), wrb, NULL);
1557 1598
1558 /* version 1 of the cmd is not supported only by BE2 */ 1599 /* version 1 of the cmd is not supported only by BE2 */
1559 if (!BE2_chip(adapter)) 1600 if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1598 req = embedded_payload(wrb); 1639 req = embedded_payload(wrb);
1599 1640
1600 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1601 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1642 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1602 wrb, NULL); 1643 sizeof(*req), wrb, NULL);
1603 1644
1604 be_mcc_notify(adapter); 1645 be_mcc_notify(adapter);
1605 1646
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1625 req = embedded_payload(wrb); 1666 req = embedded_payload(wrb);
1626 1667
1627 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1628 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); 1669 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1670 NULL);
1629 req->fat_operation = cpu_to_le32(QUERY_FAT); 1671 req->fat_operation = cpu_to_le32(QUERY_FAT);
1630 status = be_mcc_notify_wait(adapter); 1672 status = be_mcc_notify_wait(adapter);
1631 if (!status) { 1673 if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1655 1697
1656 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1698 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1657 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1699 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1658 get_fat_cmd.size, 1700 get_fat_cmd.size,
1659 &get_fat_cmd.dma); 1701 &get_fat_cmd.dma);
1660 if (!get_fat_cmd.va) { 1702 if (!get_fat_cmd.va) {
1661 status = -ENOMEM; 1703 status = -ENOMEM;
1662 dev_err(&adapter->pdev->dev, 1704 dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1679 1721
1680 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1722 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1681 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1682 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, 1724 OPCODE_COMMON_MANAGE_FAT, payload_len,
1683 &get_fat_cmd); 1725 wrb, &get_fat_cmd);
1684 1726
1685 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1727 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1686 req->read_log_offset = cpu_to_le32(log_offset); 1728 req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1691 if (!status) { 1733 if (!status) {
1692 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1734 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1693 memcpy(buf + offset, 1735 memcpy(buf + offset,
1694 resp->data_buffer, 1736 resp->data_buffer,
1695 le32_to_cpu(resp->read_log_length)); 1737 le32_to_cpu(resp->read_log_length));
1696 } else { 1738 } else {
1697 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1739 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1698 goto err; 1740 goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1702 } 1744 }
1703err: 1745err:
1704 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1705 get_fat_cmd.va, 1747 get_fat_cmd.va, get_fat_cmd.dma);
1706 get_fat_cmd.dma);
1707 spin_unlock_bh(&adapter->mcc_lock); 1748 spin_unlock_bh(&adapter->mcc_lock);
1708} 1749}
1709 1750
1710/* Uses synchronous mcc */ 1751/* Uses synchronous mcc */
1711int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1752int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1712 char *fw_on_flash) 1753 char *fw_on_flash)
1713{ 1754{
1714 struct be_mcc_wrb *wrb; 1755 struct be_mcc_wrb *wrb;
1715 struct be_cmd_req_get_fw_version *req; 1756 struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1726 req = embedded_payload(wrb); 1767 req = embedded_payload(wrb);
1727 1768
1728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1769 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1729 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); 1770 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1771 NULL);
1730 status = be_mcc_notify_wait(adapter); 1772 status = be_mcc_notify_wait(adapter);
1731 if (!status) { 1773 if (!status) {
1732 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1774 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1759 req = embedded_payload(wrb); 1801 req = embedded_payload(wrb);
1760 1802
1761 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1803 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1762 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1804 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1805 NULL);
1763 1806
1764 req->num_eq = cpu_to_le32(num); 1807 req->num_eq = cpu_to_le32(num);
1765 for (i = 0; i < num; i++) { 1808 for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
1777 1820
1778/* Uses sycnhronous mcc */ 1821/* Uses sycnhronous mcc */
1779int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1822int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1780 u32 num, bool promiscuous) 1823 u32 num)
1781{ 1824{
1782 struct be_mcc_wrb *wrb; 1825 struct be_mcc_wrb *wrb;
1783 struct be_cmd_req_vlan_config *req; 1826 struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1793 req = embedded_payload(wrb); 1836 req = embedded_payload(wrb);
1794 1837
1795 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1838 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1796 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); 1839 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1840 wrb, NULL);
1797 1841
1798 req->interface_id = if_id; 1842 req->interface_id = if_id;
1799 req->promiscuous = promiscuous;
1800 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1843 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1801 req->num_vlan = num; 1844 req->num_vlan = num;
1802 if (!promiscuous) { 1845 memcpy(req->normal_vlan, vtag_array,
1803 memcpy(req->normal_vlan, vtag_array, 1846 req->num_vlan * sizeof(vtag_array[0]));
1804 req->num_vlan * sizeof(vtag_array[0]));
1805 }
1806 1847
1807 status = be_mcc_notify_wait(adapter); 1848 status = be_mcc_notify_wait(adapter);
1808
1809err: 1849err:
1810 spin_unlock_bh(&adapter->mcc_lock); 1850 spin_unlock_bh(&adapter->mcc_lock);
1811 return status; 1851 return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1827 } 1867 }
1828 memset(req, 0, sizeof(*req)); 1868 memset(req, 0, sizeof(*req));
1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1869 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1830 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1870 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1831 wrb, mem); 1871 wrb, mem);
1832 1872
1833 req->if_id = cpu_to_le32(adapter->if_handle); 1873 req->if_id = cpu_to_le32(adapter->if_handle);
1834 if (flags & IFF_PROMISC) { 1874 if (flags & IFF_PROMISC) {
1835 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1875 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1836 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1876 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1837 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1877 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1838 if (value == ON) 1878 if (value == ON)
1839 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1879 req->if_flags =
1840 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1880 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1841 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1881 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1882 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1842 } else if (flags & IFF_ALLMULTI) { 1883 } else if (flags & IFF_ALLMULTI) {
1843 req->if_flags_mask = req->if_flags = 1884 req->if_flags_mask = req->if_flags =
1844 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1885 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1867 } 1908 }
1868 1909
1869 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != 1910 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1870 req->if_flags_mask) { 1911 req->if_flags_mask) {
1871 dev_warn(&adapter->pdev->dev, 1912 dev_warn(&adapter->pdev->dev,
1872 "Cannot set rx filter flags 0x%x\n", 1913 "Cannot set rx filter flags 0x%x\n",
1873 req->if_flags_mask); 1914 req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1905 req = embedded_payload(wrb); 1946 req = embedded_payload(wrb);
1906 1947
1907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1948 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1908 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1949 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1950 wrb, NULL);
1909 1951
1910 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1952 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1911 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 1953 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1938 req = embedded_payload(wrb); 1980 req = embedded_payload(wrb);
1939 1981
1940 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1941 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1983 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
1984 wrb, NULL);
1942 1985
1943 status = be_mcc_notify_wait(adapter); 1986 status = be_mcc_notify_wait(adapter);
1944 if (!status) { 1987 if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1968 req = embedded_payload(wrb); 2011 req = embedded_payload(wrb);
1969 2012
1970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1971 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); 2014 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2015 sizeof(*req), wrb, NULL);
1972 2016
1973 status = be_mbox_notify_wait(adapter); 2017 status = be_mbox_notify_wait(adapter);
1974 if (!status) { 2018 if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
2011 req = embedded_payload(wrb); 2055 req = embedded_payload(wrb);
2012 2056
2013 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2057 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2014 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); 2058 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2059 NULL);
2015 2060
2016 status = be_mbox_notify_wait(adapter); 2061 status = be_mbox_notify_wait(adapter);
2017 2062
@@ -2020,47 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
2020} 2065}
2021 2066
2022int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2067int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2023 u32 rss_hash_opts, u16 table_size) 2068 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2024{ 2069{
2025 struct be_mcc_wrb *wrb; 2070 struct be_mcc_wrb *wrb;
2026 struct be_cmd_req_rss_config *req; 2071 struct be_cmd_req_rss_config *req;
2027 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
2028 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
2029 0x3ea83c02, 0x4a110304};
2030 int status; 2072 int status;
2031 2073
2032 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2074 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2033 return 0; 2075 return 0;
2034 2076
2035 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2077 spin_lock_bh(&adapter->mcc_lock);
2036 return -1;
2037 2078
2038 wrb = wrb_from_mbox(adapter); 2079 wrb = wrb_from_mccq(adapter);
2080 if (!wrb) {
2081 status = -EBUSY;
2082 goto err;
2083 }
2039 req = embedded_payload(wrb); 2084 req = embedded_payload(wrb);
2040 2085
2041 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2086 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2042 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2087 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2043 2088
2044 req->if_id = cpu_to_le32(adapter->if_handle); 2089 req->if_id = cpu_to_le32(adapter->if_handle);
2045 req->enable_rss = cpu_to_le16(rss_hash_opts); 2090 req->enable_rss = cpu_to_le16(rss_hash_opts);
2046 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2091 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2047 2092
2048 if (lancer_chip(adapter) || skyhawk_chip(adapter)) 2093 if (!BEx_chip(adapter))
2049 req->hdr.version = 1; 2094 req->hdr.version = 1;
2050 2095
2051 memcpy(req->cpu_table, rsstable, table_size); 2096 memcpy(req->cpu_table, rsstable, table_size);
2052 memcpy(req->hash, myhash, sizeof(myhash)); 2097 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2053 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2098 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2054 2099
2055 status = be_mbox_notify_wait(adapter); 2100 status = be_mcc_notify_wait(adapter);
2056 2101err:
2057 mutex_unlock(&adapter->mbox_lock); 2102 spin_unlock_bh(&adapter->mcc_lock);
2058 return status; 2103 return status;
2059} 2104}
2060 2105
2061/* Uses sync mcc */ 2106/* Uses sync mcc */
2062int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2107int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2063 u8 bcn, u8 sts, u8 state) 2108 u8 bcn, u8 sts, u8 state)
2064{ 2109{
2065 struct be_mcc_wrb *wrb; 2110 struct be_mcc_wrb *wrb;
2066 struct be_cmd_req_enable_disable_beacon *req; 2111 struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2076 req = embedded_payload(wrb); 2121 req = embedded_payload(wrb);
2077 2122
2078 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2123 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2079 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); 2124 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2125 sizeof(*req), wrb, NULL);
2080 2126
2081 req->port_num = port_num; 2127 req->port_num = port_num;
2082 req->beacon_state = state; 2128 req->beacon_state = state;
@@ -2107,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2107 req = embedded_payload(wrb); 2153 req = embedded_payload(wrb);
2108 2154
2109 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2110 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); 2156 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2157 wrb, NULL);
2111 2158
2112 req->port_num = port_num; 2159 req->port_num = port_num;
2113 2160
@@ -2146,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2146 req = embedded_payload(wrb); 2193 req = embedded_payload(wrb);
2147 2194
2148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2195 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2149 OPCODE_COMMON_WRITE_OBJECT, 2196 OPCODE_COMMON_WRITE_OBJECT,
2150 sizeof(struct lancer_cmd_req_write_object), wrb, 2197 sizeof(struct lancer_cmd_req_write_object), wrb,
2151 NULL); 2198 NULL);
2152 2199
2153 ctxt = &req->context; 2200 ctxt = &req->context;
2154 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2201 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2155 write_length, ctxt, data_size); 2202 write_length, ctxt, data_size);
2156 2203
2157 if (data_size == 0) 2204 if (data_size == 0)
2158 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2205 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2159 eof, ctxt, 1); 2206 eof, ctxt, 1);
2160 else 2207 else
2161 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2208 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2162 eof, ctxt, 0); 2209 eof, ctxt, 0);
2163 2210
2164 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2211 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2165 req->write_offset = cpu_to_le32(data_offset); 2212 req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2167 req->descriptor_count = cpu_to_le32(1); 2214 req->descriptor_count = cpu_to_le32(1);
2168 req->buf_len = cpu_to_le32(data_size); 2215 req->buf_len = cpu_to_le32(data_size);
2169 req->addr_low = cpu_to_le32((cmd->dma + 2216 req->addr_low = cpu_to_le32((cmd->dma +
2170 sizeof(struct lancer_cmd_req_write_object)) 2217 sizeof(struct lancer_cmd_req_write_object))
2171 & 0xFFFFFFFF); 2218 & 0xFFFFFFFF);
2172 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2219 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2173 sizeof(struct lancer_cmd_req_write_object))); 2220 sizeof(struct lancer_cmd_req_write_object)));
2174 2221
@@ -2197,8 +2244,8 @@ err_unlock:
2197} 2244}
2198 2245
2199int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2246int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2200 u32 data_size, u32 data_offset, const char *obj_name, 2247 u32 data_size, u32 data_offset, const char *obj_name,
2201 u32 *data_read, u32 *eof, u8 *addn_status) 2248 u32 *data_read, u32 *eof, u8 *addn_status)
2202{ 2249{
2203 struct be_mcc_wrb *wrb; 2250 struct be_mcc_wrb *wrb;
2204 struct lancer_cmd_req_read_object *req; 2251 struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2216 req = embedded_payload(wrb); 2263 req = embedded_payload(wrb);
2217 2264
2218 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2265 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2219 OPCODE_COMMON_READ_OBJECT, 2266 OPCODE_COMMON_READ_OBJECT,
2220 sizeof(struct lancer_cmd_req_read_object), wrb, 2267 sizeof(struct lancer_cmd_req_read_object), wrb,
2221 NULL); 2268 NULL);
2222 2269
2223 req->desired_read_len = cpu_to_le32(data_size); 2270 req->desired_read_len = cpu_to_le32(data_size);
2224 req->read_offset = cpu_to_le32(data_offset); 2271 req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2291,7 @@ err_unlock:
2244} 2291}
2245 2292
2246int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2293int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2247 u32 flash_type, u32 flash_opcode, u32 buf_size) 2294 u32 flash_type, u32 flash_opcode, u32 buf_size)
2248{ 2295{
2249 struct be_mcc_wrb *wrb; 2296 struct be_mcc_wrb *wrb;
2250 struct be_cmd_write_flashrom *req; 2297 struct be_cmd_write_flashrom *req;
@@ -2261,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2261 req = cmd->va; 2308 req = cmd->va;
2262 2309
2263 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2264 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); 2311 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2312 cmd);
2265 2313
2266 req->params.op_type = cpu_to_le32(flash_type); 2314 req->params.op_type = cpu_to_le32(flash_type);
2267 req->params.op_code = cpu_to_le32(flash_opcode); 2315 req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2284,7 +2332,7 @@ err_unlock:
2284} 2332}
2285 2333
2286int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2334int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2287 int offset) 2335 u16 optype, int offset)
2288{ 2336{
2289 struct be_mcc_wrb *wrb; 2337 struct be_mcc_wrb *wrb;
2290 struct be_cmd_read_flash_crc *req; 2338 struct be_cmd_read_flash_crc *req;
@@ -2303,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2303 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2351 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2304 wrb, NULL); 2352 wrb, NULL);
2305 2353
2306 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 2354 req->params.op_type = cpu_to_le32(optype);
2307 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2355 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2308 req->params.offset = cpu_to_le32(offset); 2356 req->params.offset = cpu_to_le32(offset);
2309 req->params.data_buf_size = cpu_to_le32(0x4); 2357 req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2318,7 +2366,7 @@ err:
2318} 2366}
2319 2367
2320int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2368int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2321 struct be_dma_mem *nonemb_cmd) 2369 struct be_dma_mem *nonemb_cmd)
2322{ 2370{
2323 struct be_mcc_wrb *wrb; 2371 struct be_mcc_wrb *wrb;
2324 struct be_cmd_req_acpi_wol_magic_config *req; 2372 struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2334 req = nonemb_cmd->va; 2382 req = nonemb_cmd->va;
2335 2383
2336 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2337 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, 2385 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2338 nonemb_cmd); 2386 wrb, nonemb_cmd);
2339 memcpy(req->magic_mac, mac, ETH_ALEN); 2387 memcpy(req->magic_mac, mac, ETH_ALEN);
2340 2388
2341 status = be_mcc_notify_wait(adapter); 2389 status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2363 req = embedded_payload(wrb); 2411 req = embedded_payload(wrb);
2364 2412
2365 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2413 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2366 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, 2414 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2367 NULL); 2415 wrb, NULL);
2368 2416
2369 req->src_port = port_num; 2417 req->src_port = port_num;
2370 req->dest_port = port_num; 2418 req->dest_port = port_num;
@@ -2378,7 +2426,8 @@ err:
2378} 2426}
2379 2427
2380int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2428int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2429 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2430 u64 pattern)
2382{ 2431{
2383 struct be_mcc_wrb *wrb; 2432 struct be_mcc_wrb *wrb;
2384 struct be_cmd_req_loopback_test *req; 2433 struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2396 req = embedded_payload(wrb); 2445 req = embedded_payload(wrb);
2397 2446
2398 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2447 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2399 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2448 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2449 NULL);
2400 2450
2401 req->hdr.timeout = cpu_to_le32(15); 2451 req->hdr.timeout = cpu_to_le32(15);
2402 req->pattern = cpu_to_le64(pattern); 2452 req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2471,7 @@ err:
2421} 2471}
2422 2472
2423int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2473int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2424 u32 byte_cnt, struct be_dma_mem *cmd) 2474 u32 byte_cnt, struct be_dma_mem *cmd)
2425{ 2475{
2426 struct be_mcc_wrb *wrb; 2476 struct be_mcc_wrb *wrb;
2427 struct be_cmd_req_ddrdma_test *req; 2477 struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2437 } 2487 }
2438 req = cmd->va; 2488 req = cmd->va;
2439 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2489 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2440 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); 2490 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2491 cmd);
2441 2492
2442 req->pattern = cpu_to_le64(pattern); 2493 req->pattern = cpu_to_le64(pattern);
2443 req->byte_count = cpu_to_le32(byte_cnt); 2494 req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2516,7 @@ err:
2465} 2516}
2466 2517
2467int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2518int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2468 struct be_dma_mem *nonemb_cmd) 2519 struct be_dma_mem *nonemb_cmd)
2469{ 2520{
2470 struct be_mcc_wrb *wrb; 2521 struct be_mcc_wrb *wrb;
2471 struct be_cmd_req_seeprom_read *req; 2522 struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2481 req = nonemb_cmd->va; 2532 req = nonemb_cmd->va;
2482 2533
2483 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2534 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2484 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2535 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2485 nonemb_cmd); 2536 nonemb_cmd);
2486 2537
2487 status = be_mcc_notify_wait(adapter); 2538 status = be_mcc_notify_wait(adapter);
2488 2539
@@ -2510,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2510 goto err; 2561 goto err;
2511 } 2562 }
2512 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2563 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2513 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2564 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2514 &cmd.dma);
2515 if (!cmd.va) { 2565 if (!cmd.va) {
2516 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2566 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2517 status = -ENOMEM; 2567 status = -ENOMEM;
@@ -2521,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2521 req = cmd.va; 2571 req = cmd.va;
2522 2572
2523 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2573 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2524 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2574 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2525 wrb, &cmd); 2575 wrb, &cmd);
2526 2576
2527 status = be_mcc_notify_wait(adapter); 2577 status = be_mcc_notify_wait(adapter);
2528 if (!status) { 2578 if (!status) {
@@ -2544,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2544 BE_SUPPORTED_SPEED_1GBPS; 2594 BE_SUPPORTED_SPEED_1GBPS;
2545 } 2595 }
2546 } 2596 }
2547 pci_free_consistent(adapter->pdev, cmd.size, 2597 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2548 cmd.va, cmd.dma);
2549err: 2598err:
2550 spin_unlock_bh(&adapter->mcc_lock); 2599 spin_unlock_bh(&adapter->mcc_lock);
2551 return status; 2600 return status;
@@ -2568,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2568 req = embedded_payload(wrb); 2617 req = embedded_payload(wrb);
2569 2618
2570 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2619 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2571 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2620 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2572 2621
2573 req->hdr.domain = domain; 2622 req->hdr.domain = domain;
2574 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 2623 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2597 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2646 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2598 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2647 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2599 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2648 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2600 &attribs_cmd.dma); 2649 &attribs_cmd.dma);
2601 if (!attribs_cmd.va) { 2650 if (!attribs_cmd.va) {
2602 dev_err(&adapter->pdev->dev, 2651 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2603 "Memory allocation failure\n");
2604 status = -ENOMEM; 2652 status = -ENOMEM;
2605 goto err; 2653 goto err;
2606 } 2654 }
@@ -2613,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2613 req = attribs_cmd.va; 2661 req = attribs_cmd.va;
2614 2662
2615 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2616 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, 2664 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2617 &attribs_cmd); 2665 wrb, &attribs_cmd);
2618 2666
2619 status = be_mbox_notify_wait(adapter); 2667 status = be_mbox_notify_wait(adapter);
2620 if (!status) { 2668 if (!status) {
@@ -2649,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
2649 req = embedded_payload(wrb); 2697 req = embedded_payload(wrb);
2650 2698
2651 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2699 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2652 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); 2700 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2701 sizeof(*req), wrb, NULL);
2653 2702
2654 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2703 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2655 CAPABILITY_BE3_NATIVE_ERX_API); 2704 CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2762 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2811 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2763 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2812 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2764 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2813 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2765 get_mac_list_cmd.size, 2814 get_mac_list_cmd.size,
2766 &get_mac_list_cmd.dma); 2815 &get_mac_list_cmd.dma);
2767 2816
2768 if (!get_mac_list_cmd.va) { 2817 if (!get_mac_list_cmd.va) {
2769 dev_err(&adapter->pdev->dev, 2818 dev_err(&adapter->pdev->dev,
2770 "Memory allocation failure during GET_MAC_LIST\n"); 2819 "Memory allocation failure during GET_MAC_LIST\n");
2771 return -ENOMEM; 2820 return -ENOMEM;
2772 } 2821 }
2773 2822
@@ -2831,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2831 /* If no active mac_id found, return first mac addr */ 2880 /* If no active mac_id found, return first mac addr */
2832 *pmac_id_valid = false; 2881 *pmac_id_valid = false;
2833 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2882 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2834 ETH_ALEN); 2883 ETH_ALEN);
2835 } 2884 }
2836 2885
2837out: 2886out:
2838 spin_unlock_bh(&adapter->mcc_lock); 2887 spin_unlock_bh(&adapter->mcc_lock);
2839 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2888 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2840 get_mac_list_cmd.va, get_mac_list_cmd.dma); 2889 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2841 return status; 2890 return status;
2842} 2891}
2843 2892
2844int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, 2893int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
2845 u32 if_handle, bool active, u32 domain) 2894 u8 *mac, u32 if_handle, bool active, u32 domain)
2846{ 2895{
2847 2896
2848 if (!active) 2897 if (!active)
@@ -2892,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2892 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2941 memset(&cmd, 0, sizeof(struct be_dma_mem));
2893 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2942 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2894 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2943 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2895 &cmd.dma, GFP_KERNEL); 2944 &cmd.dma, GFP_KERNEL);
2896 if (!cmd.va) 2945 if (!cmd.va)
2897 return -ENOMEM; 2946 return -ENOMEM;
2898 2947
@@ -2906,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2906 2955
2907 req = cmd.va; 2956 req = cmd.va;
2908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2957 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2909 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2958 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2910 wrb, &cmd); 2959 wrb, &cmd);
2911 2960
2912 req->hdr.domain = domain; 2961 req->hdr.domain = domain;
2913 req->mac_count = mac_count; 2962 req->mac_count = mac_count;
@@ -2917,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2917 status = be_mcc_notify_wait(adapter); 2966 status = be_mcc_notify_wait(adapter);
2918 2967
2919err: 2968err:
2920 dma_free_coherent(&adapter->pdev->dev, cmd.size, 2969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2921 cmd.va, cmd.dma);
2922 spin_unlock_bh(&adapter->mcc_lock); 2970 spin_unlock_bh(&adapter->mcc_lock);
2923 return status; 2971 return status;
2924} 2972}
@@ -2963,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2963 ctxt = &req->context; 3011 ctxt = &req->context;
2964 3012
2965 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2966 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3014 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3015 NULL);
2967 3016
2968 req->hdr.domain = domain; 3017 req->hdr.domain = domain;
2969 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3018 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3009 ctxt = &req->context; 3058 ctxt = &req->context;
3010 3059
3011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3060 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3012 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3061 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3062 NULL);
3013 3063
3014 req->hdr.domain = domain; 3064 req->hdr.domain = domain;
3015 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3065 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3027 if (!status) { 3077 if (!status) {
3028 struct be_cmd_resp_get_hsw_config *resp = 3078 struct be_cmd_resp_get_hsw_config *resp =
3029 embedded_payload(wrb); 3079 embedded_payload(wrb);
3030 be_dws_le_to_cpu(&resp->context, 3080 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3031 sizeof(resp->context));
3032 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3081 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3033 pvid, &resp->context); 3082 pvid, &resp->context);
3034 if (pvid) 3083 if (pvid)
3035 *pvid = le16_to_cpu(vid); 3084 *pvid = le16_to_cpu(vid);
3036 if (mode) 3085 if (mode)
@@ -3062,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3062 3111
3063 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3112 memset(&cmd, 0, sizeof(struct be_dma_mem));
3064 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3113 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3065 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3114 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3066 &cmd.dma);
3067 if (!cmd.va) { 3115 if (!cmd.va) {
3068 dev_err(&adapter->pdev->dev, 3116 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3069 "Memory allocation failure\n");
3070 status = -ENOMEM; 3117 status = -ENOMEM;
3071 goto err; 3118 goto err;
3072 } 3119 }
@@ -3349,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3349 3396
3350 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3397 memset(&cmd, 0, sizeof(struct be_dma_mem));
3351 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3398 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3352 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3399 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3353 &cmd.dma);
3354 if (!cmd.va) { 3400 if (!cmd.va) {
3355 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3401 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3356 status = -ENOMEM; 3402 status = -ENOMEM;
@@ -3396,7 +3442,7 @@ err:
3396 3442
3397/* Uses mbox */ 3443/* Uses mbox */
3398static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3444static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3399 u8 domain, struct be_dma_mem *cmd) 3445 u8 domain, struct be_dma_mem *cmd)
3400{ 3446{
3401 struct be_mcc_wrb *wrb; 3447 struct be_mcc_wrb *wrb;
3402 struct be_cmd_req_get_profile_config *req; 3448 struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3424 3470
3425/* Uses sync mcc */ 3471/* Uses sync mcc */
3426static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3472static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3427 u8 domain, struct be_dma_mem *cmd) 3473 u8 domain, struct be_dma_mem *cmd)
3428{ 3474{
3429 struct be_mcc_wrb *wrb; 3475 struct be_mcc_wrb *wrb;
3430 struct be_cmd_req_get_profile_config *req; 3476 struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3484 resp = cmd.va; 3530 resp = cmd.va;
3485 desc_count = le32_to_cpu(resp->desc_count); 3531 desc_count = le32_to_cpu(resp->desc_count);
3486 3532
3487 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3533 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3488 desc_count); 3534 desc_count);
3489 if (pcie) 3535 if (pcie)
3490 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3536 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3491 3537
@@ -3548,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
3548 nic->cq_count = 0xFFFF; 3594 nic->cq_count = 0xFFFF;
3549 nic->toe_conn_count = 0xFFFF; 3595 nic->toe_conn_count = 0xFFFF;
3550 nic->eq_count = 0xFFFF; 3596 nic->eq_count = 0xFFFF;
3597 nic->iface_count = 0xFFFF;
3551 nic->link_param = 0xFF; 3598 nic->link_param = 0xFF;
3599 nic->channel_id_param = cpu_to_le16(0xF000);
3552 nic->acpi_params = 0xFF; 3600 nic->acpi_params = 0xFF;
3553 nic->wol_param = 0x0F; 3601 nic->wol_param = 0x0F;
3554 nic->bw_min = 0xFFFFFFFF; 3602 nic->tunnel_iface_count = 0xFFFF;
3603 nic->direct_tenant_iface_count = 0xFFFF;
3555 nic->bw_max = 0xFFFFFFFF; 3604 nic->bw_max = 0xFFFFFFFF;
3556} 3605}
3557 3606
3558int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain) 3607int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3608 u8 domain)
3559{ 3609{
3560 if (lancer_chip(adapter)) { 3610 struct be_nic_res_desc nic_desc;
3561 struct be_nic_res_desc nic_desc; 3611 u32 bw_percent;
3612 u16 version = 0;
3613
3614 if (BE3_chip(adapter))
3615 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3562 3616
3563 be_reset_nic_desc(&nic_desc); 3617 be_reset_nic_desc(&nic_desc);
3618 nic_desc.pf_num = adapter->pf_number;
3619 nic_desc.vf_num = domain;
3620 if (lancer_chip(adapter)) {
3564 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3621 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3565 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3622 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3566 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 3623 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3567 (1 << NOSV_SHIFT); 3624 (1 << NOSV_SHIFT);
3568 nic_desc.pf_num = adapter->pf_number; 3625 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3569 nic_desc.vf_num = domain;
3570 nic_desc.bw_max = cpu_to_le32(bps);
3571
3572 return be_cmd_set_profile_config(adapter, &nic_desc,
3573 RESOURCE_DESC_SIZE_V0,
3574 0, domain);
3575 } else { 3626 } else {
3576 return be_cmd_set_qos(adapter, bps, domain); 3627 version = 1;
3628 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3629 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3630 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3631 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3632 nic_desc.bw_max = cpu_to_le32(bw_percent);
3577 } 3633 }
3634
3635 return be_cmd_set_profile_config(adapter, &nic_desc,
3636 nic_desc.hdr.desc_len,
3637 version, domain);
3578} 3638}
3579 3639
3580int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 3640int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3859,7 +3919,7 @@ err:
3859} 3919}
3860 3920
3861int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3921int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3862 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3922 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3863{ 3923{
3864 struct be_adapter *adapter = netdev_priv(netdev_handle); 3924 struct be_adapter *adapter = netdev_priv(netdev_handle);
3865 struct be_mcc_wrb *wrb; 3925 struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..3e0a6b243806 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27) 50#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
51 51
52/* Completion Status */ 52/* Completion Status */
53enum { 53enum mcc_base_status {
54 MCC_STATUS_SUCCESS = 0, 54 MCC_STATUS_SUCCESS = 0,
55 MCC_STATUS_FAILED = 1, 55 MCC_STATUS_FAILED = 1,
56 MCC_STATUS_ILLEGAL_REQUEST = 2, 56 MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16 63/* Additional status */
64enum mcc_addl_status {
65 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
66 MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
67 MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
68};
69
70#define CQE_BASE_STATUS_MASK 0xFFFF
71#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
72#define CQE_ADDL_STATUS_MASK 0xFF
73#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
64 74
65#define CQE_STATUS_COMPL_MASK 0xFFFF 75#define base_status(status) \
66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 76 ((enum mcc_base_status) \
67#define CQE_STATUS_EXTD_MASK 0xFFFF 77 (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
68#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */ 78#define addl_status(status) \
79 ((enum mcc_addl_status) \
80 (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
81 CQE_ADDL_STATUS_MASK : 0))
69 82
70struct be_mcc_compl { 83struct be_mcc_compl {
71 u32 status; /* dword 0 */ 84 u32 status; /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
74 u32 flags; /* dword 3 */ 87 u32 flags; /* dword 3 */
75}; 88};
76 89
77/* When the async bit of mcc_compl is set, the last 4 bytes of 90/* When the async bit of mcc_compl flags is set, flags
78 * mcc_compl is interpreted as follows: 91 * is interpreted as follows:
79 */ 92 */
80#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ 93#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
81#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF 94#define ASYNC_EVENT_CODE_MASK 0xFF
82#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 95#define ASYNC_EVENT_TYPE_SHIFT 16
83#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF 96#define ASYNC_EVENT_TYPE_MASK 0xFF
84#define ASYNC_EVENT_CODE_LINK_STATE 0x1 97#define ASYNC_EVENT_CODE_LINK_STATE 0x1
85#define ASYNC_EVENT_CODE_GRP_5 0x5 98#define ASYNC_EVENT_CODE_GRP_5 0x5
86#define ASYNC_EVENT_QOS_SPEED 0x1 99#define ASYNC_EVENT_QOS_SPEED 0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
89#define ASYNC_EVENT_CODE_QNQ 0x6 102#define ASYNC_EVENT_CODE_QNQ 0x6
90#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1 103#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
91 104
92struct be_async_event_trailer {
93 u32 code;
94};
95
96enum { 105enum {
97 LINK_DOWN = 0x0, 106 LINK_DOWN = 0x0,
98 LINK_UP = 0x1 107 LINK_UP = 0x1
@@ -100,7 +109,7 @@ enum {
100#define LINK_STATUS_MASK 0x1 109#define LINK_STATUS_MASK 0x1
101#define LOGICAL_LINK_STATUS_MASK 0x2 110#define LOGICAL_LINK_STATUS_MASK 0x2
102 111
103/* When the event code of an async trailer is link-state, the mcc_compl 112/* When the event code of compl->flags is link-state, the mcc_compl
104 * must be interpreted as follows 113 * must be interpreted as follows
105 */ 114 */
106struct be_async_event_link_state { 115struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
110 u8 port_speed; 119 u8 port_speed;
111 u8 port_fault; 120 u8 port_fault;
112 u8 rsvd0[7]; 121 u8 rsvd0[7];
113 struct be_async_event_trailer trailer; 122 u32 flags;
114} __packed; 123} __packed;
115 124
116/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED 125/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
117 * the mcc_compl must be interpreted as follows 126 * the mcc_compl must be interpreted as follows
118 */ 127 */
119struct be_async_event_grp5_qos_link_speed { 128struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
121 u8 rsvd[5]; 130 u8 rsvd[5];
122 u16 qos_link_speed; 131 u16 qos_link_speed;
123 u32 event_tag; 132 u32 event_tag;
124 struct be_async_event_trailer trailer; 133 u32 flags;
125} __packed; 134} __packed;
126 135
127/* When the event code of an async trailer is GRP5 and event type is 136/* When the event code of compl->flags is GRP5 and event type is
128 * CoS-Priority, the mcc_compl must be interpreted as follows 137 * CoS-Priority, the mcc_compl must be interpreted as follows
129 */ 138 */
130struct be_async_event_grp5_cos_priority { 139struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
134 u8 valid; 143 u8 valid;
135 u8 rsvd0; 144 u8 rsvd0;
136 u8 event_tag; 145 u8 event_tag;
137 struct be_async_event_trailer trailer; 146 u32 flags;
138} __packed; 147} __packed;
139 148
140/* When the event code of an async trailer is GRP5 and event type is 149/* When the event code of compl->flags is GRP5 and event type is
141 * PVID state, the mcc_compl must be interpreted as follows 150 * PVID state, the mcc_compl must be interpreted as follows
142 */ 151 */
143struct be_async_event_grp5_pvid_state { 152struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
146 u16 tag; 155 u16 tag;
147 u32 event_tag; 156 u32 event_tag;
148 u32 rsvd1; 157 u32 rsvd1;
149 struct be_async_event_trailer trailer; 158 u32 flags;
150} __packed; 159} __packed;
151 160
152/* async event indicating outer VLAN tag in QnQ */ 161/* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
156 u16 vlan_tag; 165 u16 vlan_tag;
157 u32 event_tag; 166 u32 event_tag;
158 u8 rsvd1[4]; 167 u8 rsvd1[4];
159 struct be_async_event_trailer trailer; 168 u32 flags;
160} __packed; 169} __packed;
161 170
162struct be_mcc_mailbox { 171struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
258 u8 opcode; /* dword 0 */ 267 u8 opcode; /* dword 0 */
259 u8 subsystem; /* dword 0 */ 268 u8 subsystem; /* dword 0 */
260 u8 rsvd[2]; /* dword 0 */ 269 u8 rsvd[2]; /* dword 0 */
261 u8 status; /* dword 1 */ 270 u8 base_status; /* dword 1 */
262 u8 add_status; /* dword 1 */ 271 u8 addl_status; /* dword 1 */
263 u8 rsvd1[2]; /* dword 1 */ 272 u8 rsvd1[2]; /* dword 1 */
264 u32 response_length; /* dword 2 */ 273 u32 response_length; /* dword 2 */
265 u32 actual_resp_len; /* dword 3 */ 274 u32 actual_resp_len; /* dword 3 */
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
1186 struct flashrom_params params; 1195 struct flashrom_params params;
1187 u8 crc[4]; 1196 u8 crc[4];
1188 u8 rsvd[4]; 1197 u8 rsvd[4];
1189}; 1198} __packed;
1199
1190/**************** Lancer Firmware Flash ************/ 1200/**************** Lancer Firmware Flash ************/
1191struct amap_lancer_write_obj_context { 1201struct amap_lancer_write_obj_context {
1192 u8 write_length[24]; 1202 u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
1891 u16 cq_count; 1901 u16 cq_count;
1892 u16 toe_conn_count; 1902 u16 toe_conn_count;
1893 u16 eq_count; 1903 u16 eq_count;
1894 u32 rsvd5; 1904 u16 vlan_id;
1905 u16 iface_count;
1895 u32 cap_flags; 1906 u32 cap_flags;
1896 u8 link_param; 1907 u8 link_param;
1897 u8 rsvd6[3]; 1908 u8 rsvd6;
1909 u16 channel_id_param;
1898 u32 bw_min; 1910 u32 bw_min;
1899 u32 bw_max; 1911 u32 bw_max;
1900 u8 acpi_params; 1912 u8 acpi_params;
1901 u8 wol_param; 1913 u8 wol_param;
1902 u16 rsvd7; 1914 u16 rsvd7;
1903 u32 rsvd8[7]; 1915 u16 tunnel_iface_count;
1916 u16 direct_tenant_iface_count;
1917 u32 rsvd8[6];
1904} __packed; 1918} __packed;
1905 1919
1906/************ Multi-Channel type ***********/ 1920/************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
2060 char *fw_on_flash); 2074 char *fw_on_flash);
2061int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2075int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2062int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2076int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2063 u32 num, bool promiscuous); 2077 u32 num);
2064int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2078int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2065int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2079int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2066int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2080int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2082,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
2068 u32 *function_mode, u32 *function_caps, u16 *asic_rev); 2082 u32 *function_mode, u32 *function_caps, u16 *asic_rev);
2069int be_cmd_reset_function(struct be_adapter *adapter); 2083int be_cmd_reset_function(struct be_adapter *adapter);
2070int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2084int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2071 u32 rss_hash_opts, u16 table_size); 2085 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
2072int be_process_mcc(struct be_adapter *adapter); 2086int be_process_mcc(struct be_adapter *adapter);
2073int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, 2087int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
2074 u8 status, u8 state); 2088 u8 status, u8 state);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2084 u32 data_size, u32 data_offset, const char *obj_name, 2098 u32 data_size, u32 data_offset, const char *obj_name,
2085 u32 *data_read, u32 *eof, u8 *addn_status); 2099 u32 *data_read, u32 *eof, u8 *addn_status);
2086int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2100int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2087 int offset); 2101 u16 optype, int offset);
2088int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2102int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2089 struct be_dma_mem *nonemb_cmd); 2103 struct be_dma_mem *nonemb_cmd);
2090int be_cmd_fw_init(struct be_adapter *adapter); 2104int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2101int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 2115int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2102 u8 loopback_type, u8 enable); 2116 u8 loopback_type, u8 enable);
2103int be_cmd_get_phy_info(struct be_adapter *adapter); 2117int be_cmd_get_phy_info(struct be_adapter *adapter);
2104int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain); 2118int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
2119 u16 link_speed, u8 domain);
2105void be_detect_error(struct be_adapter *adapter); 2120void be_detect_error(struct be_adapter *adapter);
2106int be_cmd_get_die_temperature(struct be_adapter *adapter); 2121int be_cmd_get_die_temperature(struct be_adapter *adapter);
2107int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 2122int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..e2da4d20dd3d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ 132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ 133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
134 {DRVSTAT_RX_INFO(rx_compl)}, 134 {DRVSTAT_RX_INFO(rx_compl)},
135 {DRVSTAT_RX_INFO(rx_compl_err)},
135 {DRVSTAT_RX_INFO(rx_mcast_pkts)}, 136 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
136 /* Number of page allocation failures while posting receive buffers 137 /* Number of page allocation failures while posting receive buffers
137 * to HW. 138 * to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
181#define BE_NO_LOOPBACK 0xff 182#define BE_NO_LOOPBACK 0xff
182 183
183static void be_get_drvinfo(struct net_device *netdev, 184static void be_get_drvinfo(struct net_device *netdev,
184 struct ethtool_drvinfo *drvinfo) 185 struct ethtool_drvinfo *drvinfo)
185{ 186{
186 struct be_adapter *adapter = netdev_priv(netdev); 187 struct be_adapter *adapter = netdev_priv(netdev);
187 188
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
201 drvinfo->eedump_len = 0; 202 drvinfo->eedump_len = 0;
202} 203}
203 204
204static u32 205static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
205lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
206{ 206{
207 u32 data_read = 0, eof; 207 u32 data_read = 0, eof;
208 u8 addn_status; 208 u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
212 memset(&data_len_cmd, 0, sizeof(data_len_cmd)); 212 memset(&data_len_cmd, 0, sizeof(data_len_cmd));
213 /* data_offset and data_size should be 0 to get reg len */ 213 /* data_offset and data_size should be 0 to get reg len */
214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, 214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
215 file_name, &data_read, &eof, &addn_status); 215 file_name, &data_read, &eof,
216 &addn_status);
216 217
217 return data_read; 218 return data_read;
218} 219}
219 220
220static int 221static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
221lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, 222 u32 buf_len, void *buf)
222 u32 buf_len, void *buf)
223{ 223{
224 struct be_dma_mem read_cmd; 224 struct be_dma_mem read_cmd;
225 u32 read_len = 0, total_read_len = 0, chunk_size; 225 u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
229 229
230 read_cmd.size = LANCER_READ_FILE_CHUNK; 230 read_cmd.size = LANCER_READ_FILE_CHUNK;
231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
232 &read_cmd.dma); 232 &read_cmd.dma);
233 233
234 if (!read_cmd.va) { 234 if (!read_cmd.va) {
235 dev_err(&adapter->pdev->dev, 235 dev_err(&adapter->pdev->dev,
236 "Memory allocation failure while reading dump\n"); 236 "Memory allocation failure while reading dump\n");
237 return -ENOMEM; 237 return -ENOMEM;
238 } 238 }
239 239
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
242 LANCER_READ_FILE_CHUNK); 242 LANCER_READ_FILE_CHUNK);
243 chunk_size = ALIGN(chunk_size, 4); 243 chunk_size = ALIGN(chunk_size, 4);
244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, 244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
245 total_read_len, file_name, &read_len, 245 total_read_len, file_name,
246 &eof, &addn_status); 246 &read_len, &eof, &addn_status);
247 if (!status) { 247 if (!status) {
248 memcpy(buf + total_read_len, read_cmd.va, read_len); 248 memcpy(buf + total_read_len, read_cmd.va, read_len);
249 total_read_len += read_len; 249 total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
254 } 254 }
255 } 255 }
256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
257 read_cmd.dma); 257 read_cmd.dma);
258 258
259 return status; 259 return status;
260} 260}
261 261
262static int 262static int be_get_reg_len(struct net_device *netdev)
263be_get_reg_len(struct net_device *netdev)
264{ 263{
265 struct be_adapter *adapter = netdev_priv(netdev); 264 struct be_adapter *adapter = netdev_priv(netdev);
266 u32 log_size = 0; 265 u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
271 if (be_physfn(adapter)) { 270 if (be_physfn(adapter)) {
272 if (lancer_chip(adapter)) 271 if (lancer_chip(adapter))
273 log_size = lancer_cmd_get_file_len(adapter, 272 log_size = lancer_cmd_get_file_len(adapter,
274 LANCER_FW_DUMP_FILE); 273 LANCER_FW_DUMP_FILE);
275 else 274 else
276 be_cmd_get_reg_len(adapter, &log_size); 275 be_cmd_get_reg_len(adapter, &log_size);
277 } 276 }
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
287 memset(buf, 0, regs->len); 286 memset(buf, 0, regs->len);
288 if (lancer_chip(adapter)) 287 if (lancer_chip(adapter))
289 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, 288 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
290 regs->len, buf); 289 regs->len, buf);
291 else 290 else
292 be_cmd_get_regs(adapter, regs->len, buf); 291 be_cmd_get_regs(adapter, regs->len, buf);
293 } 292 }
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
337 return 0; 336 return 0;
338} 337}
339 338
340static void 339static void be_get_ethtool_stats(struct net_device *netdev,
341be_get_ethtool_stats(struct net_device *netdev, 340 struct ethtool_stats *stats, uint64_t *data)
342 struct ethtool_stats *stats, uint64_t *data)
343{ 341{
344 struct be_adapter *adapter = netdev_priv(netdev); 342 struct be_adapter *adapter = netdev_priv(netdev);
345 struct be_rx_obj *rxo; 343 struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
390 } 388 }
391} 389}
392 390
393static void 391static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
394be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 392 uint8_t *data)
395 uint8_t *data)
396{ 393{
397 struct be_adapter *adapter = netdev_priv(netdev); 394 struct be_adapter *adapter = netdev_priv(netdev);
398 int i, j; 395 int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
642 adapter->rx_fc = ecmd->rx_pause; 639 adapter->rx_fc = ecmd->rx_pause;
643 640
644 status = be_cmd_set_flow_control(adapter, 641 status = be_cmd_set_flow_control(adapter,
645 adapter->tx_fc, adapter->rx_fc); 642 adapter->tx_fc, adapter->rx_fc);
646 if (status) 643 if (status)
647 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); 644 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
648 645
649 return status; 646 return status;
650} 647}
651 648
652static int 649static int be_set_phys_id(struct net_device *netdev,
653be_set_phys_id(struct net_device *netdev, 650 enum ethtool_phys_id_state state)
654 enum ethtool_phys_id_state state)
655{ 651{
656 struct be_adapter *adapter = netdev_priv(netdev); 652 struct be_adapter *adapter = netdev_priv(netdev);
657 653
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
708 return status; 704 return status;
709} 705}
710 706
711static void 707static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
712be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
713{ 708{
714 struct be_adapter *adapter = netdev_priv(netdev); 709 struct be_adapter *adapter = netdev_priv(netdev);
715 710
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
723 memset(&wol->sopass, 0, sizeof(wol->sopass)); 718 memset(&wol->sopass, 0, sizeof(wol->sopass));
724} 719}
725 720
726static int 721static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
727be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
728{ 722{
729 struct be_adapter *adapter = netdev_priv(netdev); 723 struct be_adapter *adapter = netdev_priv(netdev);
730 724
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
744 return 0; 738 return 0;
745} 739}
746 740
747static int 741static int be_test_ddr_dma(struct be_adapter *adapter)
748be_test_ddr_dma(struct be_adapter *adapter)
749{ 742{
750 int ret, i; 743 int ret, i;
751 struct be_dma_mem ddrdma_cmd; 744 struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
761 754
762 for (i = 0; i < 2; i++) { 755 for (i = 0; i < 2; i++) {
763 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 756 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
764 4096, &ddrdma_cmd); 757 4096, &ddrdma_cmd);
765 if (ret != 0) 758 if (ret != 0)
766 goto err; 759 goto err;
767 } 760 }
@@ -773,20 +766,17 @@ err:
773} 766}
774 767
775static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 768static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
776 u64 *status) 769 u64 *status)
777{ 770{
778 be_cmd_set_loopback(adapter, adapter->hba_port_num, 771 be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
779 loopback_type, 1);
780 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, 772 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
781 loopback_type, 1500, 773 loopback_type, 1500, 2, 0xabc);
782 2, 0xabc); 774 be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
783 be_cmd_set_loopback(adapter, adapter->hba_port_num,
784 BE_NO_LOOPBACK, 1);
785 return *status; 775 return *status;
786} 776}
787 777
788static void 778static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
789be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 779 u64 *data)
790{ 780{
791 struct be_adapter *adapter = netdev_priv(netdev); 781 struct be_adapter *adapter = netdev_priv(netdev);
792 int status; 782 int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
801 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 791 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
802 792
803 if (test->flags & ETH_TEST_FL_OFFLINE) { 793 if (test->flags & ETH_TEST_FL_OFFLINE) {
804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 794 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
805 &data[0]) != 0)
806 test->flags |= ETH_TEST_FL_FAILED; 795 test->flags |= ETH_TEST_FL_FAILED;
807 796
808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 797 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
809 &data[1]) != 0)
810 test->flags |= ETH_TEST_FL_FAILED; 798 test->flags |= ETH_TEST_FL_FAILED;
811 799
812 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { 800 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
832 } 820 }
833} 821}
834 822
835static int 823static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
836be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
837{ 824{
838 struct be_adapter *adapter = netdev_priv(netdev); 825 struct be_adapter *adapter = netdev_priv(netdev);
839 826
840 return be_load_fw(adapter, efl->data); 827 return be_load_fw(adapter, efl->data);
841} 828}
842 829
843static int 830static int be_get_eeprom_len(struct net_device *netdev)
844be_get_eeprom_len(struct net_device *netdev)
845{ 831{
846 struct be_adapter *adapter = netdev_priv(netdev); 832 struct be_adapter *adapter = netdev_priv(netdev);
847 833
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
851 if (lancer_chip(adapter)) { 837 if (lancer_chip(adapter)) {
852 if (be_physfn(adapter)) 838 if (be_physfn(adapter))
853 return lancer_cmd_get_file_len(adapter, 839 return lancer_cmd_get_file_len(adapter,
854 LANCER_VPD_PF_FILE); 840 LANCER_VPD_PF_FILE);
855 else 841 else
856 return lancer_cmd_get_file_len(adapter, 842 return lancer_cmd_get_file_len(adapter,
857 LANCER_VPD_VF_FILE); 843 LANCER_VPD_VF_FILE);
858 } else { 844 } else {
859 return BE_READ_SEEPROM_LEN; 845 return BE_READ_SEEPROM_LEN;
860 } 846 }
861} 847}
862 848
863static int 849static int be_read_eeprom(struct net_device *netdev,
864be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 850 struct ethtool_eeprom *eeprom, uint8_t *data)
865 uint8_t *data)
866{ 851{
867 struct be_adapter *adapter = netdev_priv(netdev); 852 struct be_adapter *adapter = netdev_priv(netdev);
868 struct be_dma_mem eeprom_cmd; 853 struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
875 if (lancer_chip(adapter)) { 860 if (lancer_chip(adapter)) {
876 if (be_physfn(adapter)) 861 if (be_physfn(adapter))
877 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, 862 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
878 eeprom->len, data); 863 eeprom->len, data);
879 else 864 else
880 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, 865 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
881 eeprom->len, data); 866 eeprom->len, data);
882 } 867 }
883 868
884 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); 869 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
933 918
934 switch (flow_type) { 919 switch (flow_type) {
935 case TCP_V4_FLOW: 920 case TCP_V4_FLOW:
936 if (adapter->rss_flags & RSS_ENABLE_IPV4) 921 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
937 data |= RXH_IP_DST | RXH_IP_SRC; 922 data |= RXH_IP_DST | RXH_IP_SRC;
938 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4) 923 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
939 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 924 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
940 break; 925 break;
941 case UDP_V4_FLOW: 926 case UDP_V4_FLOW:
942 if (adapter->rss_flags & RSS_ENABLE_IPV4) 927 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
943 data |= RXH_IP_DST | RXH_IP_SRC; 928 data |= RXH_IP_DST | RXH_IP_SRC;
944 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4) 929 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
945 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 930 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
946 break; 931 break;
947 case TCP_V6_FLOW: 932 case TCP_V6_FLOW:
948 if (adapter->rss_flags & RSS_ENABLE_IPV6) 933 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
949 data |= RXH_IP_DST | RXH_IP_SRC; 934 data |= RXH_IP_DST | RXH_IP_SRC;
950 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6) 935 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
951 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 936 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
952 break; 937 break;
953 case UDP_V6_FLOW: 938 case UDP_V6_FLOW:
954 if (adapter->rss_flags & RSS_ENABLE_IPV6) 939 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
955 data |= RXH_IP_DST | RXH_IP_SRC; 940 data |= RXH_IP_DST | RXH_IP_SRC;
956 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6) 941 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
957 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 942 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
958 break; 943 break;
959 } 944 }
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
962} 947}
963 948
964static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 949static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
965 u32 *rule_locs) 950 u32 *rule_locs)
966{ 951{
967 struct be_adapter *adapter = netdev_priv(netdev); 952 struct be_adapter *adapter = netdev_priv(netdev);
968 953
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
992 struct be_rx_obj *rxo; 977 struct be_rx_obj *rxo;
993 int status = 0, i, j; 978 int status = 0, i, j;
994 u8 rsstable[128]; 979 u8 rsstable[128];
995 u32 rss_flags = adapter->rss_flags; 980 u32 rss_flags = adapter->rss_info.rss_flags;
996 981
997 if (cmd->data != L3_RSS_FLAGS && 982 if (cmd->data != L3_RSS_FLAGS &&
998 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) 983 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1039 return -EINVAL; 1024 return -EINVAL;
1040 } 1025 }
1041 1026
1042 if (rss_flags == adapter->rss_flags) 1027 if (rss_flags == adapter->rss_info.rss_flags)
1043 return status; 1028 return status;
1044 1029
1045 if (be_multi_rxq(adapter)) { 1030 if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1051 } 1036 }
1052 } 1037 }
1053 } 1038 }
1054 status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128); 1039
1040 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1041 rss_flags, 128, adapter->rss_info.rss_hkey);
1055 if (!status) 1042 if (!status)
1056 adapter->rss_flags = rss_flags; 1043 adapter->rss_info.rss_flags = rss_flags;
1057 1044
1058 return status; 1045 return status;
1059} 1046}
@@ -1103,6 +1090,69 @@ static int be_set_channels(struct net_device *netdev,
1103 return be_update_queues(adapter); 1090 return be_update_queues(adapter);
1104} 1091}
1105 1092
1093static u32 be_get_rxfh_indir_size(struct net_device *netdev)
1094{
1095 return RSS_INDIR_TABLE_LEN;
1096}
1097
1098static u32 be_get_rxfh_key_size(struct net_device *netdev)
1099{
1100 return RSS_HASH_KEY_LEN;
1101}
1102
1103static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
1104{
1105 struct be_adapter *adapter = netdev_priv(netdev);
1106 int i;
1107 struct rss_info *rss = &adapter->rss_info;
1108
1109 if (indir) {
1110 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
1111 indir[i] = rss->rss_queue[i];
1112 }
1113
1114 if (hkey)
1115 memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
1116
1117 return 0;
1118}
1119
1120static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1121 const u8 *hkey)
1122{
1123 int rc = 0, i, j;
1124 struct be_adapter *adapter = netdev_priv(netdev);
1125 u8 rsstable[RSS_INDIR_TABLE_LEN];
1126
1127 if (indir) {
1128 struct be_rx_obj *rxo;
1129 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1130 j = indir[i];
1131 rxo = &adapter->rx_obj[j];
1132 rsstable[i] = rxo->rss_id;
1133 adapter->rss_info.rss_queue[i] = j;
1134 }
1135 } else {
1136 memcpy(rsstable, adapter->rss_info.rsstable,
1137 RSS_INDIR_TABLE_LEN);
1138 }
1139
1140 if (!hkey)
1141 hkey = adapter->rss_info.rss_hkey;
1142
1143 rc = be_cmd_rss_config(adapter, rsstable,
1144 adapter->rss_info.rss_flags,
1145 RSS_INDIR_TABLE_LEN, hkey);
1146 if (rc) {
1147 adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1148 return -EIO;
1149 }
1150 memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
1151 memcpy(adapter->rss_info.rsstable, rsstable,
1152 RSS_INDIR_TABLE_LEN);
1153 return 0;
1154}
1155
1106const struct ethtool_ops be_ethtool_ops = { 1156const struct ethtool_ops be_ethtool_ops = {
1107 .get_settings = be_get_settings, 1157 .get_settings = be_get_settings,
1108 .get_drvinfo = be_get_drvinfo, 1158 .get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {
1129 .self_test = be_self_test, 1179 .self_test = be_self_test,
1130 .get_rxnfc = be_get_rxnfc, 1180 .get_rxnfc = be_get_rxnfc,
1131 .set_rxnfc = be_set_rxnfc, 1181 .set_rxnfc = be_set_rxnfc,
1182 .get_rxfh_indir_size = be_get_rxfh_indir_size,
1183 .get_rxfh_key_size = be_get_rxfh_key_size,
1184 .get_rxfh = be_get_rxfh,
1185 .set_rxfh = be_set_rxfh,
1132 .get_channels = be_get_channels, 1186 .get_channels = be_get_channels,
1133 .set_channels = be_set_channels 1187 .set_channels = be_set_channels
1134}; 1188};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3bd198550edb..8840c64aaeca 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -188,10 +188,14 @@
188#define OPTYPE_FCOE_FW_ACTIVE 10 188#define OPTYPE_FCOE_FW_ACTIVE 10
189#define OPTYPE_FCOE_FW_BACKUP 11 189#define OPTYPE_FCOE_FW_BACKUP 11
190#define OPTYPE_NCSI_FW 13 190#define OPTYPE_NCSI_FW 13
191#define OPTYPE_REDBOOT_DIR 18
192#define OPTYPE_REDBOOT_CONFIG 19
193#define OPTYPE_SH_PHY_FW 21
194#define OPTYPE_FLASHISM_JUMPVECTOR 22
195#define OPTYPE_UFI_DIR 23
191#define OPTYPE_PHY_FW 99 196#define OPTYPE_PHY_FW 99
192#define TN_8022 13 197#define TN_8022 13
193 198
194#define ILLEGAL_IOCTL_REQ 2
195#define FLASHROM_OPER_PHY_FLASH 9 199#define FLASHROM_OPER_PHY_FLASH 9
196#define FLASHROM_OPER_PHY_SAVE 10 200#define FLASHROM_OPER_PHY_SAVE 10
197#define FLASHROM_OPER_FLASH 1 201#define FLASHROM_OPER_FLASH 1
@@ -250,6 +254,9 @@
250#define IMAGE_FIRMWARE_BACKUP_FCoE 178 254#define IMAGE_FIRMWARE_BACKUP_FCoE 178
251#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179 255#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
252#define IMAGE_FIRMWARE_PHY 192 256#define IMAGE_FIRMWARE_PHY 192
257#define IMAGE_REDBOOT_DIR 208
258#define IMAGE_REDBOOT_CONFIG 209
259#define IMAGE_UFI_DIR 210
253#define IMAGE_BOOT_CODE 224 260#define IMAGE_BOOT_CODE 224
254 261
255/************* Rx Packet Type Encoding **************/ 262/************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
534 u32 image_size; 541 u32 image_size;
535 u32 cksum; 542 u32 cksum;
536 u32 entry_point; 543 u32 entry_point;
537 u32 rsvd0; 544 u16 optype;
545 u16 rsvd0;
538 u32 rsvd1; 546 u32 rsvd1;
539 u8 ver_data[32]; 547 u8 ver_data[32];
540} __packed; 548} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5dec77..6822b3d76d85 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134} 134}
135 135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137 u16 len, u16 entry_size) 137 u16 len, u16 entry_size)
138{ 138{
139 struct be_dma_mem *mem = &q->dma_mem; 139 struct be_dma_mem *mem = &q->dma_mem;
140 140
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
154 u32 reg, enabled; 154 u32 reg, enabled;
155 155
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg); 157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159 159
160 if (!enabled && enable) 160 if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
165 return; 165 return;
166 166
167 pci_write_config_dword(adapter->pdev, 167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169} 169}
170 170
171static void be_intr_set(struct be_adapter *adapter, bool enable) 171static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
206} 206}
207 207
208static void be_eq_notify(struct be_adapter *adapter, u16 qid, 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209 bool arm, bool clear_int, u16 num_popped) 209 bool arm, bool clear_int, u16 num_popped)
210{ 210{
211 u32 val = 0; 211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK; 212 val |= qid & DB_EQ_RING_ID_MASK;
213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214 DB_EQ_RING_ID_EXT_MASK_SHIFT);
215 214
216 if (adapter->eeh_error) 215 if (adapter->eeh_error)
217 return; 216 return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
477 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; 476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; 477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
480 if (be_roce_supported(adapter)) { 479 if (be_roce_supported(adapter)) {
481 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; 480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; 481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483 drvs->rx_roce_frames = port_stats->roce_frames_received; 482 drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
491{ 490{
492 491
493 struct be_drv_stats *drvs = &adapter->drv_stats; 492 struct be_drv_stats *drvs = &adapter->drv_stats;
494 struct lancer_pport_stats *pport_stats = 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
495 pport_stats_from_cmd(adapter);
496 494
497 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); 495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; 496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
539} 537}
540 538
541static void populate_erx_stats(struct be_adapter *adapter, 539static void populate_erx_stats(struct be_adapter *adapter,
542 struct be_rx_obj *rxo, 540 struct be_rx_obj *rxo, u32 erx_stat)
543 u32 erx_stat)
544{ 541{
545 if (!BEx_chip(adapter)) 542 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat; 543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
579} 576}
580 577
581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582 struct rtnl_link_stats64 *stats) 579 struct rtnl_link_stats64 *stats)
583{ 580{
584 struct be_adapter *adapter = netdev_priv(netdev); 581 struct be_adapter *adapter = netdev_priv(netdev);
585 struct be_drv_stats *drvs = &adapter->drv_stats; 582 struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
660} 657}
661 658
662static void be_tx_stats_update(struct be_tx_obj *txo, 659static void be_tx_stats_update(struct be_tx_obj *txo,
663 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
664{ 662{
665 struct be_tx_stats *stats = tx_stats(txo); 663 struct be_tx_stats *stats = tx_stats(txo);
666 664
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
676 674
677/* Determine number of WRB entries needed to xmit data in an skb */ 675/* Determine number of WRB entries needed to xmit data in an skb */
678static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679 bool *dummy) 677 bool *dummy)
680{ 678{
681 int cnt = (skb->len > skb->data_len); 679 int cnt = (skb->len > skb->data_len);
682 680
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
704} 702}
705 703
706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707 struct sk_buff *skb) 705 struct sk_buff *skb)
708{ 706{
709 u8 vlan_prio; 707 u8 vlan_prio;
710 u16 vlan_tag; 708 u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
733} 731}
734 732
735static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
736 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) 734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
737{ 736{
738 u16 vlan_tag, proto; 737 u16 vlan_tag, proto;
739 738
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
774} 773}
775 774
776static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
777 bool unmap_single) 776 bool unmap_single)
778{ 777{
779 dma_addr_t dma; 778 dma_addr_t dma;
780 779
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
791} 790}
792 791
793static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
794 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795 bool skip_hw_vlan) 794 bool skip_hw_vlan)
796{ 795{
797 dma_addr_t busaddr; 796 dma_addr_t busaddr;
798 int i, copied = 0; 797 int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
821 } 820 }
822 821
823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
824 const struct skb_frag_struct *frag = 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
825 &skb_shinfo(skb)->frags[i];
826 busaddr = skb_frag_dma_map(dev, frag, 0, 824 busaddr = skb_frag_dma_map(dev, frag, 0,
827 skb_frag_size(frag), DMA_TO_DEVICE); 825 skb_frag_size(frag), DMA_TO_DEVICE);
828 if (dma_mapping_error(dev, busaddr)) 826 if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
927 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928} 926}
929 927
930static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
931 struct sk_buff *skb)
932{ 929{
933 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
934} 931}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
959 */ 956 */
960 if (be_pvid_tagging_enabled(adapter) && 957 if (be_pvid_tagging_enabled(adapter) &&
961 veh->h_vlan_proto == htons(ETH_P_8021Q)) 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
962 *skip_hw_vlan = true; 959 *skip_hw_vlan = true;
963 960
964 /* HW has a bug wherein it will calculate CSUM for VLAN 961 /* HW has a bug wherein it will calculate CSUM for VLAN
965 * pkts even though it is disabled. 962 * pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077{ 1074{
1078 struct be_adapter *adapter = netdev_priv(netdev); 1075 struct be_adapter *adapter = netdev_priv(netdev);
1079 if (new_mtu < BE_MIN_MTU || 1076 if (new_mtu < BE_MIN_MTU ||
1080 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
1081 (ETH_HLEN + ETH_FCS_LEN))) {
1082 dev_info(&adapter->pdev->dev, 1078 dev_info(&adapter->pdev->dev,
1083 "MTU must be between %d and %d bytes\n", 1079 "MTU must be between %d and %d bytes\n",
1084 BE_MIN_MTU, 1080 BE_MIN_MTU,
1085 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); 1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1086 return -EINVAL; 1082 return -EINVAL;
1087 } 1083 }
1088 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089 netdev->mtu, new_mtu); 1085 netdev->mtu, new_mtu);
1090 netdev->mtu = new_mtu; 1086 netdev->mtu = new_mtu;
1091 return 0; 1087 return 0;
1092} 1088}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1098static int be_vid_config(struct be_adapter *adapter) 1094static int be_vid_config(struct be_adapter *adapter)
1099{ 1095{
1100 u16 vids[BE_NUM_VLANS_SUPPORTED]; 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
1101 u16 num = 0, i; 1097 u16 num = 0, i = 0;
1102 int status = 0; 1098 int status = 0;
1103 1099
1104 /* No need to further configure vids if in promiscuous mode */ 1100 /* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
1109 goto set_vlan_promisc; 1105 goto set_vlan_promisc;
1110 1106
1111 /* Construct VLAN Table to give to HW */ 1107 /* Construct VLAN Table to give to HW */
1112 for (i = 0; i < VLAN_N_VID; i++) 1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1113 if (adapter->vlan_tag[i]) 1109 vids[num++] = cpu_to_le16(i);
1114 vids[num++] = cpu_to_le16(i);
1115
1116 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1117 vids, num, 0);
1118 1110
1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1119 if (status) { 1112 if (status) {
1120 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) 1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1122 goto set_vlan_promisc; 1116 goto set_vlan_promisc;
1123 dev_err(&adapter->pdev->dev, 1117 dev_err(&adapter->pdev->dev,
1124 "Setting HW VLAN filtering failed.\n"); 1118 "Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1160 if (lancer_chip(adapter) && vid == 0) 1154 if (lancer_chip(adapter) && vid == 0)
1161 return status; 1155 return status;
1162 1156
1163 if (adapter->vlan_tag[vid]) 1157 if (test_bit(vid, adapter->vids))
1164 return status; 1158 return status;
1165 1159
1166 adapter->vlan_tag[vid] = 1; 1160 set_bit(vid, adapter->vids);
1167 adapter->vlans_added++; 1161 adapter->vlans_added++;
1168 1162
1169 status = be_vid_config(adapter); 1163 status = be_vid_config(adapter);
1170 if (status) { 1164 if (status) {
1171 adapter->vlans_added--; 1165 adapter->vlans_added--;
1172 adapter->vlan_tag[vid] = 0; 1166 clear_bit(vid, adapter->vids);
1173 } 1167 }
1174 1168
1175 return status; 1169 return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1184 if (lancer_chip(adapter) && vid == 0) 1178 if (lancer_chip(adapter) && vid == 0)
1185 goto ret; 1179 goto ret;
1186 1180
1187 adapter->vlan_tag[vid] = 0; 1181 clear_bit(vid, adapter->vids);
1188 status = be_vid_config(adapter); 1182 status = be_vid_config(adapter);
1189 if (!status) 1183 if (!status)
1190 adapter->vlans_added--; 1184 adapter->vlans_added--;
1191 else 1185 else
1192 adapter->vlan_tag[vid] = 1; 1186 set_bit(vid, adapter->vids);
1193ret: 1187ret:
1194 return status; 1188 return status;
1195} 1189}
@@ -1197,7 +1191,7 @@ ret:
1197static void be_clear_promisc(struct be_adapter *adapter) 1191static void be_clear_promisc(struct be_adapter *adapter)
1198{ 1192{
1199 adapter->promiscuous = false; 1193 adapter->promiscuous = false;
1200 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; 1194 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
1201 1195
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1196 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203} 1197}
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
1222 1216
1223 /* Enable multicast promisc if num configured exceeds what we support */ 1217 /* Enable multicast promisc if num configured exceeds what we support */
1224 if (netdev->flags & IFF_ALLMULTI || 1218 if (netdev->flags & IFF_ALLMULTI ||
1225 netdev_mc_count(netdev) > be_max_mc(adapter)) { 1219 netdev_mc_count(netdev) > be_max_mc(adapter))
1226 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1220 goto set_mcast_promisc;
1227 goto done;
1228 }
1229 1221
1230 if (netdev_uc_count(netdev) != adapter->uc_macs) { 1222 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231 struct netdev_hw_addr *ha; 1223 struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
1251 } 1243 }
1252 1244
1253 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 1245 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254 1246 if (!status) {
1255 /* Set to MCAST promisc mode if setting MULTICAST address fails */ 1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 if (status) { 1248 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1257 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); 1249 goto done;
1258 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 } 1250 }
1251
1252set_mcast_promisc:
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 return;
1255
1256 /* Set to MCAST promisc mode if setting MULTICAST address fails
1257 * or if num configured exceeds what we support
1258 */
1259 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 if (!status)
1261 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
1261done: 1262done:
1262 return; 1263 return;
1263} 1264}
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1287 1288
1288 if (status) 1289 if (status)
1289 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290 mac, vf); 1291 mac, vf);
1291 else 1292 else
1292 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 1293 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1293 1294
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1295} 1296}
1296 1297
1297static int be_get_vf_config(struct net_device *netdev, int vf, 1298static int be_get_vf_config(struct net_device *netdev, int vf,
1298 struct ifla_vf_info *vi) 1299 struct ifla_vf_info *vi)
1299{ 1300{
1300 struct be_adapter *adapter = netdev_priv(netdev); 1301 struct be_adapter *adapter = netdev_priv(netdev);
1301 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1302 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1307 return -EINVAL; 1308 return -EINVAL;
1308 1309
1309 vi->vf = vf; 1310 vi->vf = vf;
1310 vi->tx_rate = vf_cfg->tx_rate; 1311 vi->max_tx_rate = vf_cfg->tx_rate;
1312 vi->min_tx_rate = 0;
1311 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; 1313 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; 1314 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1313 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1315 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1316 return 0; 1318 return 0;
1317} 1319}
1318 1320
1319static int be_set_vf_vlan(struct net_device *netdev, 1321static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1320 int vf, u16 vlan, u8 qos)
1321{ 1322{
1322 struct be_adapter *adapter = netdev_priv(netdev); 1323 struct be_adapter *adapter = netdev_priv(netdev);
1323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
1348 return status; 1349 return status;
1349} 1350}
1350 1351
1351static int be_set_vf_tx_rate(struct net_device *netdev, 1352static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1352 int vf, int rate) 1353 int min_tx_rate, int max_tx_rate)
1353{ 1354{
1354 struct be_adapter *adapter = netdev_priv(netdev); 1355 struct be_adapter *adapter = netdev_priv(netdev);
1355 int status = 0; 1356 struct device *dev = &adapter->pdev->dev;
1357 int percent_rate, status = 0;
1358 u16 link_speed = 0;
1359 u8 link_status;
1356 1360
1357 if (!sriov_enabled(adapter)) 1361 if (!sriov_enabled(adapter))
1358 return -EPERM; 1362 return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1360 if (vf >= adapter->num_vfs) 1364 if (vf >= adapter->num_vfs)
1361 return -EINVAL; 1365 return -EINVAL;
1362 1366
1363 if (rate < 100 || rate > 10000) { 1367 if (min_tx_rate)
1364 dev_err(&adapter->pdev->dev,
1365 "tx rate must be between 100 and 10000 Mbps\n");
1366 return -EINVAL; 1368 return -EINVAL;
1369
1370 if (!max_tx_rate)
1371 goto config_qos;
1372
1373 status = be_cmd_link_status_query(adapter, &link_speed,
1374 &link_status, 0);
1375 if (status)
1376 goto err;
1377
1378 if (!link_status) {
1379 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1380 status = -EPERM;
1381 goto err;
1382 }
1383
1384 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1385 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1386 link_speed);
1387 status = -EINVAL;
1388 goto err;
1389 }
1390
1391 /* On Skyhawk the QOS setting must be done only as a % value */
1392 percent_rate = link_speed / 100;
1393 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1394 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1395 percent_rate);
1396 status = -EINVAL;
1397 goto err;
1367 } 1398 }
1368 1399
1369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1); 1400config_qos:
1401 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1370 if (status) 1402 if (status)
1371 dev_err(&adapter->pdev->dev, 1403 goto err;
1372 "tx rate %d on VF %d failed\n", rate, vf); 1404
1373 else 1405 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1374 adapter->vf_cfg[vf].tx_rate = rate; 1406 return 0;
1407
1408err:
1409 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1410 max_tx_rate, vf);
1375 return status; 1411 return status;
1376} 1412}
1377static int be_set_vf_link_state(struct net_device *netdev, int vf, 1413static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
1469} 1505}
1470 1506
1471static void be_rx_stats_update(struct be_rx_obj *rxo, 1507static void be_rx_stats_update(struct be_rx_obj *rxo,
1472 struct be_rx_compl_info *rxcp) 1508 struct be_rx_compl_info *rxcp)
1473{ 1509{
1474 struct be_rx_stats *stats = rx_stats(rxo); 1510 struct be_rx_stats *stats = rx_stats(rxo);
1475 1511
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1566 skb_frag_set_page(skb, 0, page_info->page); 1602 skb_frag_set_page(skb, 0, page_info->page);
1567 skb_shinfo(skb)->frags[0].page_offset = 1603 skb_shinfo(skb)->frags[0].page_offset =
1568 page_info->page_offset + hdr_len; 1604 page_info->page_offset + hdr_len;
1569 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); 1605 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1606 curr_frag_len - hdr_len);
1570 skb->data_len = curr_frag_len - hdr_len; 1607 skb->data_len = curr_frag_len - hdr_len;
1571 skb->truesize += rx_frag_size; 1608 skb->truesize += rx_frag_size;
1572 skb->tail += hdr_len; 1609 skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1725 if (rxcp->vlanf) { 1762 if (rxcp->vlanf) {
1726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, 1763 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1727 compl); 1764 compl);
1728 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1765 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1729 compl); 1766 vlan_tag, compl);
1730 } 1767 }
1731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); 1768 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1732 rxcp->tunneled = 1769 rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1757 if (rxcp->vlanf) { 1794 if (rxcp->vlanf) {
1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, 1795 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1759 compl); 1796 compl);
1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1797 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1761 compl); 1798 vlan_tag, compl);
1762 } 1799 }
1763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); 1800 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1764 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, 1801 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1799 rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1800 1837
1801 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && 1838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1802 !adapter->vlan_tag[rxcp->vlan_tag]) 1839 !test_bit(rxcp->vlan_tag, adapter->vids))
1803 rxcp->vlanf = 0; 1840 rxcp->vlanf = 0;
1804 } 1841 }
1805 1842
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1915} 1952}
1916 1953
1917static u16 be_tx_compl_process(struct be_adapter *adapter, 1954static u16 be_tx_compl_process(struct be_adapter *adapter,
1918 struct be_tx_obj *txo, u16 last_index) 1955 struct be_tx_obj *txo, u16 last_index)
1919{ 1956{
1920 struct be_queue_info *txq = &txo->q; 1957 struct be_queue_info *txq = &txo->q;
1921 struct be_eth_wrb *wrb; 1958 struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2122 2159
2123 eq = &eqo->q; 2160 eq = &eqo->q;
2124 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 2161 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2125 sizeof(struct be_eq_entry)); 2162 sizeof(struct be_eq_entry));
2126 if (rc) 2163 if (rc)
2127 return rc; 2164 return rc;
2128 2165
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
2155 2192
2156 cq = &adapter->mcc_obj.cq; 2193 cq = &adapter->mcc_obj.cq;
2157 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, 2194 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2158 sizeof(struct be_mcc_compl))) 2195 sizeof(struct be_mcc_compl)))
2159 goto err; 2196 goto err;
2160 2197
2161 /* Use the default EQ for MCC completions */ 2198 /* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
2275 rxo->adapter = adapter; 2312 rxo->adapter = adapter;
2276 cq = &rxo->cq; 2313 cq = &rxo->cq;
2277 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 2314 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2278 sizeof(struct be_eth_rx_compl)); 2315 sizeof(struct be_eth_rx_compl));
2279 if (rc) 2316 if (rc)
2280 return rc; 2317 return rc;
2281 2318
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
2339} 2376}
2340 2377
2341static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, 2378static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2342 int budget, int polling) 2379 int budget, int polling)
2343{ 2380{
2344 struct be_adapter *adapter = rxo->adapter; 2381 struct be_adapter *adapter = rxo->adapter;
2345 struct be_queue_info *rx_cq = &rxo->cq; 2382 struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2365 * promiscuous mode on some skews 2402 * promiscuous mode on some skews
2366 */ 2403 */
2367 if (unlikely(rxcp->port != adapter->port_num && 2404 if (unlikely(rxcp->port != adapter->port_num &&
2368 !lancer_chip(adapter))) { 2405 !lancer_chip(adapter))) {
2369 be_rx_compl_discard(rxo, rxcp); 2406 be_rx_compl_discard(rxo, rxcp);
2370 goto loop_continue; 2407 goto loop_continue;
2371 } 2408 }
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2405 if (!txcp) 2442 if (!txcp)
2406 break; 2443 break;
2407 num_wrbs += be_tx_compl_process(adapter, txo, 2444 num_wrbs += be_tx_compl_process(adapter, txo,
2408 AMAP_GET_BITS(struct amap_eth_tx_compl, 2445 AMAP_GET_BITS(struct
2409 wrb_index, txcp)); 2446 amap_eth_tx_compl,
2447 wrb_index, txcp));
2410 } 2448 }
2411 2449
2412 if (work_done) { 2450 if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2416 /* As Tx wrbs have been freed up, wake up netdev queue 2454 /* As Tx wrbs have been freed up, wake up netdev queue
2417 * if it was stopped due to lack of tx wrbs. */ 2455 * if it was stopped due to lack of tx wrbs. */
2418 if (__netif_subqueue_stopped(adapter->netdev, idx) && 2456 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2419 atomic_read(&txo->q.used) < txo->q.len / 2) { 2457 atomic_read(&txo->q.used) < txo->q.len / 2) {
2420 netif_wake_subqueue(adapter->netdev, idx); 2458 netif_wake_subqueue(adapter->netdev, idx);
2421 } 2459 }
2422 2460
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 2548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512 sliport_err1 = ioread32(adapter->db + 2550 sliport_err1 = ioread32(adapter->db +
2513 SLIPORT_ERROR1_OFFSET); 2551 SLIPORT_ERROR1_OFFSET);
2514 sliport_err2 = ioread32(adapter->db + 2552 sliport_err2 = ioread32(adapter->db +
2515 SLIPORT_ERROR2_OFFSET); 2553 SLIPORT_ERROR2_OFFSET);
2516 adapter->hw_error = true; 2554 adapter->hw_error = true;
2517 /* Do not log error messages if its a FW reset */ 2555 /* Do not log error messages if its a FW reset */
2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && 2556 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
2531 } 2569 }
2532 } else { 2570 } else {
2533 pci_read_config_dword(adapter->pdev, 2571 pci_read_config_dword(adapter->pdev,
2534 PCICFG_UE_STATUS_LOW, &ue_lo); 2572 PCICFG_UE_STATUS_LOW, &ue_lo);
2535 pci_read_config_dword(adapter->pdev, 2573 pci_read_config_dword(adapter->pdev,
2536 PCICFG_UE_STATUS_HIGH, &ue_hi); 2574 PCICFG_UE_STATUS_HIGH, &ue_hi);
2537 pci_read_config_dword(adapter->pdev, 2575 pci_read_config_dword(adapter->pdev,
2538 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2576 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2539 pci_read_config_dword(adapter->pdev, 2577 pci_read_config_dword(adapter->pdev,
2540 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); 2578 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2541 2579
2542 ue_lo = (ue_lo & ~ue_lo_mask); 2580 ue_lo = (ue_lo & ~ue_lo_mask);
2543 ue_hi = (ue_hi & ~ue_hi_mask); 2581 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
2624} 2662}
2625 2663
2626static inline int be_msix_vec_get(struct be_adapter *adapter, 2664static inline int be_msix_vec_get(struct be_adapter *adapter,
2627 struct be_eq_obj *eqo) 2665 struct be_eq_obj *eqo)
2628{ 2666{
2629 return adapter->msix_entries[eqo->msix_idx].vector; 2667 return adapter->msix_entries[eqo->msix_idx].vector;
2630} 2668}
@@ -2648,7 +2686,7 @@ err_msix:
2648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) 2686 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649 free_irq(be_msix_vec_get(adapter, eqo), eqo); 2687 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", 2688 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2651 status); 2689 status);
2652 be_msix_disable(adapter); 2690 be_msix_disable(adapter);
2653 return status; 2691 return status;
2654} 2692}
@@ -2774,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2774{ 2812{
2775 struct be_rx_obj *rxo; 2813 struct be_rx_obj *rxo;
2776 int rc, i, j; 2814 int rc, i, j;
2777 u8 rsstable[128]; 2815 u8 rss_hkey[RSS_HASH_KEY_LEN];
2816 struct rss_info *rss = &adapter->rss_info;
2778 2817
2779 for_all_rx_queues(adapter, rxo, i) { 2818 for_all_rx_queues(adapter, rxo, i) {
2780 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, 2819 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2799 } 2838 }
2800 2839
2801 if (be_multi_rxq(adapter)) { 2840 if (be_multi_rxq(adapter)) {
2802 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { 2841 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2842 j += adapter->num_rx_qs - 1) {
2803 for_all_rss_queues(adapter, rxo, i) { 2843 for_all_rss_queues(adapter, rxo, i) {
2804 if ((j + i) >= 128) 2844 if ((j + i) >= RSS_INDIR_TABLE_LEN)
2805 break; 2845 break;
2806 rsstable[j + i] = rxo->rss_id; 2846 rss->rsstable[j + i] = rxo->rss_id;
2847 rss->rss_queue[j + i] = i;
2807 } 2848 }
2808 } 2849 }
2809 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 2850 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; 2851 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2811 2852
2812 if (!BEx_chip(adapter)) 2853 if (!BEx_chip(adapter))
2813 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2854 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814 RSS_ENABLE_UDP_IPV6; 2855 RSS_ENABLE_UDP_IPV6;
2815 } else { 2856 } else {
2816 /* Disable RSS, if only default RX Q is created */ 2857 /* Disable RSS, if only default RX Q is created */
2817 adapter->rss_flags = RSS_ENABLE_NONE; 2858 rss->rss_flags = RSS_ENABLE_NONE;
2818 } 2859 }
2819 2860
2820 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2861 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
2821 128); 2862 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
2863 128, rss_hkey);
2822 if (rc) { 2864 if (rc) {
2823 adapter->rss_flags = RSS_ENABLE_NONE; 2865 rss->rss_flags = RSS_ENABLE_NONE;
2824 return rc; 2866 return rc;
2825 } 2867 }
2826 2868
2869 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2870
2827 /* First time posting */ 2871 /* First time posting */
2828 for_all_rx_queues(adapter, rxo, i) 2872 for_all_rx_queues(adapter, rxo, i)
2829 be_post_rx_frags(rxo, GFP_KERNEL); 2873 be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2896 2940
2897 if (enable) { 2941 if (enable) {
2898 status = pci_write_config_dword(adapter->pdev, 2942 status = pci_write_config_dword(adapter->pdev,
2899 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); 2943 PCICFG_PM_CONTROL_OFFSET,
2944 PCICFG_PM_CONTROL_MASK);
2900 if (status) { 2945 if (status) {
2901 dev_err(&adapter->pdev->dev, 2946 dev_err(&adapter->pdev->dev,
2902 "Could not enable Wake-on-lan\n"); 2947 "Could not enable Wake-on-lan\n");
@@ -2905,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2905 return status; 2950 return status;
2906 } 2951 }
2907 status = be_cmd_enable_magic_wol(adapter, 2952 status = be_cmd_enable_magic_wol(adapter,
2908 adapter->netdev->dev_addr, &cmd); 2953 adapter->netdev->dev_addr,
2954 &cmd);
2909 pci_enable_wake(adapter->pdev, PCI_D3hot, 1); 2955 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2910 pci_enable_wake(adapter->pdev, PCI_D3cold, 1); 2956 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2911 } else { 2957 } else {
@@ -2944,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
2944 2990
2945 if (status) 2991 if (status)
2946 dev_err(&adapter->pdev->dev, 2992 dev_err(&adapter->pdev->dev,
2947 "Mac address assignment failed for VF %d\n", vf); 2993 "Mac address assignment failed for VF %d\n",
2994 vf);
2948 else 2995 else
2949 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 2996 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2950 2997
@@ -3086,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3086 3133
3087 /* If a FW profile exists, then cap_flags are updated */ 3134 /* If a FW profile exists, then cap_flags are updated */
3088 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3135 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3089 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); 3136 BE_IF_FLAGS_BROADCAST |
3090 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3137 BE_IF_FLAGS_MULTICAST);
3091 &vf_cfg->if_handle, vf + 1); 3138 status =
3139 be_cmd_if_create(adapter, cap_flags, en_flags,
3140 &vf_cfg->if_handle, vf + 1);
3092 if (status) 3141 if (status)
3093 goto err; 3142 goto err;
3094 } 3143 }
@@ -3119,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3119 struct be_vf_cfg *vf_cfg; 3168 struct be_vf_cfg *vf_cfg;
3120 int status, old_vfs, vf; 3169 int status, old_vfs, vf;
3121 u32 privileges; 3170 u32 privileges;
3122 u16 lnk_speed;
3123 3171
3124 old_vfs = pci_num_vf(adapter->pdev); 3172 old_vfs = pci_num_vf(adapter->pdev);
3125 if (old_vfs) { 3173 if (old_vfs) {
@@ -3175,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
3175 vf); 3223 vf);
3176 } 3224 }
3177 3225
3178 /* BE3 FW, by default, caps VF TX-rate to 100mbps. 3226 /* Allow full available bandwidth */
3179 * Allow full available bandwidth 3227 if (!old_vfs)
3180 */ 3228 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3181 if (BE3_chip(adapter) && !old_vfs)
3182 be_cmd_config_qos(adapter, 1000, vf + 1);
3183
3184 status = be_cmd_link_status_query(adapter, &lnk_speed,
3185 NULL, vf + 1);
3186 if (!status)
3187 vf_cfg->tx_rate = lnk_speed;
3188 3229
3189 if (!old_vfs) { 3230 if (!old_vfs) {
3190 be_cmd_enable_vf(adapter, vf + 1); 3231 be_cmd_enable_vf(adapter, vf + 1);
@@ -3590,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
3590} 3631}
3591#endif 3632#endif
3592 3633
3593#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 3634static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3594static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3595
3596static bool be_flash_redboot(struct be_adapter *adapter,
3597 const u8 *p, u32 img_start, int image_size,
3598 int hdr_size)
3599{
3600 u32 crc_offset;
3601 u8 flashed_crc[4];
3602 int status;
3603
3604 crc_offset = hdr_size + img_start + image_size - 4;
3605
3606 p += crc_offset;
3607
3608 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3609 (image_size - 4));
3610 if (status) {
3611 dev_err(&adapter->pdev->dev,
3612 "could not get crc from flash, not flashing redboot\n");
3613 return false;
3614 }
3615
3616 /*update redboot only if crc does not match*/
3617 if (!memcmp(flashed_crc, p, 4))
3618 return false;
3619 else
3620 return true;
3621}
3622 3635
3623static bool phy_flashing_required(struct be_adapter *adapter) 3636static bool phy_flashing_required(struct be_adapter *adapter)
3624{ 3637{
@@ -3649,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
3649} 3662}
3650 3663
3651static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 3664static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3652 int header_size, 3665 int header_size,
3653 const struct firmware *fw) 3666 const struct firmware *fw)
3654{ 3667{
3655 struct flash_section_info *fsec = NULL; 3668 struct flash_section_info *fsec = NULL;
3656 const u8 *p = fw->data; 3669 const u8 *p = fw->data;
@@ -3665,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3665 return NULL; 3678 return NULL;
3666} 3679}
3667 3680
3681static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3682 u32 img_offset, u32 img_size, int hdr_size,
3683 u16 img_optype, bool *crc_match)
3684{
3685 u32 crc_offset;
3686 int status;
3687 u8 crc[4];
3688
3689 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3690 if (status)
3691 return status;
3692
3693 crc_offset = hdr_size + img_offset + img_size - 4;
3694
3695 /* Skip flashing, if crc of flashed region matches */
3696 if (!memcmp(crc, p + crc_offset, 4))
3697 *crc_match = true;
3698 else
3699 *crc_match = false;
3700
3701 return status;
3702}
3703
3668static int be_flash(struct be_adapter *adapter, const u8 *img, 3704static int be_flash(struct be_adapter *adapter, const u8 *img,
3669 struct be_dma_mem *flash_cmd, int optype, int img_size) 3705 struct be_dma_mem *flash_cmd, int optype, int img_size)
3670{ 3706{
3671 u32 total_bytes = 0, flash_op, num_bytes = 0;
3672 int status = 0;
3673 struct be_cmd_write_flashrom *req = flash_cmd->va; 3707 struct be_cmd_write_flashrom *req = flash_cmd->va;
3708 u32 total_bytes, flash_op, num_bytes;
3709 int status;
3674 3710
3675 total_bytes = img_size; 3711 total_bytes = img_size;
3676 while (total_bytes) { 3712 while (total_bytes) {
@@ -3693,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
3693 memcpy(req->data_buf, img, num_bytes); 3729 memcpy(req->data_buf, img, num_bytes);
3694 img += num_bytes; 3730 img += num_bytes;
3695 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 3731 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3696 flash_op, num_bytes); 3732 flash_op, num_bytes);
3697 if (status) { 3733 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
3698 if (status == ILLEGAL_IOCTL_REQ && 3734 optype == OPTYPE_PHY_FW)
3699 optype == OPTYPE_PHY_FW) 3735 break;
3700 break; 3736 else if (status)
3701 dev_err(&adapter->pdev->dev,
3702 "cmd to write to flash rom failed.\n");
3703 return status; 3737 return status;
3704 }
3705 } 3738 }
3706 return 0; 3739 return 0;
3707} 3740}
3708 3741
3709/* For BE2, BE3 and BE3-R */ 3742/* For BE2, BE3 and BE3-R */
3710static int be_flash_BEx(struct be_adapter *adapter, 3743static int be_flash_BEx(struct be_adapter *adapter,
3711 const struct firmware *fw, 3744 const struct firmware *fw,
3712 struct be_dma_mem *flash_cmd, 3745 struct be_dma_mem *flash_cmd, int num_of_images)
3713 int num_of_images)
3714
3715{ 3746{
3716 int status = 0, i, filehdr_size = 0;
3717 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); 3747 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3718 const u8 *p = fw->data; 3748 struct device *dev = &adapter->pdev->dev;
3719 const struct flash_comp *pflashcomp;
3720 int num_comp, redboot;
3721 struct flash_section_info *fsec = NULL; 3749 struct flash_section_info *fsec = NULL;
3750 int status, i, filehdr_size, num_comp;
3751 const struct flash_comp *pflashcomp;
3752 bool crc_match;
3753 const u8 *p;
3722 3754
3723 struct flash_comp gen3_flash_types[] = { 3755 struct flash_comp gen3_flash_types[] = {
3724 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, 3756 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3775,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3775 /* Get flash section info*/ 3807 /* Get flash section info*/
3776 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3808 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3777 if (!fsec) { 3809 if (!fsec) {
3778 dev_err(&adapter->pdev->dev, 3810 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3779 "Invalid Cookie. UFI corrupted ?\n");
3780 return -1; 3811 return -1;
3781 } 3812 }
3782 for (i = 0; i < num_comp; i++) { 3813 for (i = 0; i < num_comp; i++) {
@@ -3792,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
3792 continue; 3823 continue;
3793 3824
3794 if (pflashcomp[i].optype == OPTYPE_REDBOOT) { 3825 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3795 redboot = be_flash_redboot(adapter, fw->data, 3826 status = be_check_flash_crc(adapter, fw->data,
3796 pflashcomp[i].offset, pflashcomp[i].size, 3827 pflashcomp[i].offset,
3797 filehdr_size + img_hdrs_size); 3828 pflashcomp[i].size,
3798 if (!redboot) 3829 filehdr_size +
3830 img_hdrs_size,
3831 OPTYPE_REDBOOT, &crc_match);
3832 if (status) {
3833 dev_err(dev,
3834 "Could not get CRC for 0x%x region\n",
3835 pflashcomp[i].optype);
3836 continue;
3837 }
3838
3839 if (crc_match)
3799 continue; 3840 continue;
3800 } 3841 }
3801 3842
3802 p = fw->data; 3843 p = fw->data + filehdr_size + pflashcomp[i].offset +
3803 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; 3844 img_hdrs_size;
3804 if (p + pflashcomp[i].size > fw->data + fw->size) 3845 if (p + pflashcomp[i].size > fw->data + fw->size)
3805 return -1; 3846 return -1;
3806 3847
3807 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 3848 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3808 pflashcomp[i].size); 3849 pflashcomp[i].size);
3809 if (status) { 3850 if (status) {
3810 dev_err(&adapter->pdev->dev, 3851 dev_err(dev, "Flashing section type 0x%x failed\n",
3811 "Flashing section type %d failed.\n",
3812 pflashcomp[i].img_type); 3852 pflashcomp[i].img_type);
3813 return status; 3853 return status;
3814 } 3854 }
@@ -3816,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
3816 return 0; 3856 return 0;
3817} 3857}
3818 3858
3859static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3860{
3861 u32 img_type = le32_to_cpu(fsec_entry.type);
3862 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3863
3864 if (img_optype != 0xFFFF)
3865 return img_optype;
3866
3867 switch (img_type) {
3868 case IMAGE_FIRMWARE_iSCSI:
3869 img_optype = OPTYPE_ISCSI_ACTIVE;
3870 break;
3871 case IMAGE_BOOT_CODE:
3872 img_optype = OPTYPE_REDBOOT;
3873 break;
3874 case IMAGE_OPTION_ROM_ISCSI:
3875 img_optype = OPTYPE_BIOS;
3876 break;
3877 case IMAGE_OPTION_ROM_PXE:
3878 img_optype = OPTYPE_PXE_BIOS;
3879 break;
3880 case IMAGE_OPTION_ROM_FCoE:
3881 img_optype = OPTYPE_FCOE_BIOS;
3882 break;
3883 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3884 img_optype = OPTYPE_ISCSI_BACKUP;
3885 break;
3886 case IMAGE_NCSI:
3887 img_optype = OPTYPE_NCSI_FW;
3888 break;
3889 case IMAGE_FLASHISM_JUMPVECTOR:
3890 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3891 break;
3892 case IMAGE_FIRMWARE_PHY:
3893 img_optype = OPTYPE_SH_PHY_FW;
3894 break;
3895 case IMAGE_REDBOOT_DIR:
3896 img_optype = OPTYPE_REDBOOT_DIR;
3897 break;
3898 case IMAGE_REDBOOT_CONFIG:
3899 img_optype = OPTYPE_REDBOOT_CONFIG;
3900 break;
3901 case IMAGE_UFI_DIR:
3902 img_optype = OPTYPE_UFI_DIR;
3903 break;
3904 default:
3905 break;
3906 }
3907
3908 return img_optype;
3909}
3910
3819static int be_flash_skyhawk(struct be_adapter *adapter, 3911static int be_flash_skyhawk(struct be_adapter *adapter,
3820 const struct firmware *fw, 3912 const struct firmware *fw,
3821 struct be_dma_mem *flash_cmd, int num_of_images) 3913 struct be_dma_mem *flash_cmd, int num_of_images)
3822{ 3914{
3823 int status = 0, i, filehdr_size = 0;
3824 int img_offset, img_size, img_optype, redboot;
3825 int img_hdrs_size = num_of_images * sizeof(struct image_hdr); 3915 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3826 const u8 *p = fw->data; 3916 struct device *dev = &adapter->pdev->dev;
3827 struct flash_section_info *fsec = NULL; 3917 struct flash_section_info *fsec = NULL;
3918 u32 img_offset, img_size, img_type;
3919 int status, i, filehdr_size;
3920 bool crc_match, old_fw_img;
3921 u16 img_optype;
3922 const u8 *p;
3828 3923
3829 filehdr_size = sizeof(struct flash_file_hdr_g3); 3924 filehdr_size = sizeof(struct flash_file_hdr_g3);
3830 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3925 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3831 if (!fsec) { 3926 if (!fsec) {
3832 dev_err(&adapter->pdev->dev, 3927 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3833 "Invalid Cookie. UFI corrupted ?\n");
3834 return -1; 3928 return -1;
3835 } 3929 }
3836 3930
3837 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { 3931 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3838 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); 3932 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3839 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); 3933 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3934 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3935 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3936 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
3840 3937
3841 switch (le32_to_cpu(fsec->fsec_entry[i].type)) { 3938 if (img_optype == 0xFFFF)
3842 case IMAGE_FIRMWARE_iSCSI:
3843 img_optype = OPTYPE_ISCSI_ACTIVE;
3844 break;
3845 case IMAGE_BOOT_CODE:
3846 img_optype = OPTYPE_REDBOOT;
3847 break;
3848 case IMAGE_OPTION_ROM_ISCSI:
3849 img_optype = OPTYPE_BIOS;
3850 break;
3851 case IMAGE_OPTION_ROM_PXE:
3852 img_optype = OPTYPE_PXE_BIOS;
3853 break;
3854 case IMAGE_OPTION_ROM_FCoE:
3855 img_optype = OPTYPE_FCOE_BIOS;
3856 break;
3857 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3858 img_optype = OPTYPE_ISCSI_BACKUP;
3859 break;
3860 case IMAGE_NCSI:
3861 img_optype = OPTYPE_NCSI_FW;
3862 break;
3863 default:
3864 continue; 3939 continue;
3940 /* Don't bother verifying CRC if an old FW image is being
3941 * flashed
3942 */
3943 if (old_fw_img)
3944 goto flash;
3945
3946 status = be_check_flash_crc(adapter, fw->data, img_offset,
3947 img_size, filehdr_size +
3948 img_hdrs_size, img_optype,
3949 &crc_match);
3950 /* The current FW image on the card does not recognize the new
3951 * FLASH op_type. The FW download is partially complete.
3952 * Reboot the server now to enable FW image to recognize the
3953 * new FLASH op_type. To complete the remaining process,
3954 * download the same FW again after the reboot.
3955 */
3956 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3957 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
3958 dev_err(dev, "Flash incomplete. Reset the server\n");
3959 dev_err(dev, "Download FW image again after reset\n");
3960 return -EAGAIN;
3961 } else if (status) {
3962 dev_err(dev, "Could not get CRC for 0x%x region\n",
3963 img_optype);
3964 return -EFAULT;
3865 } 3965 }
3866 3966
3867 if (img_optype == OPTYPE_REDBOOT) { 3967 if (crc_match)
3868 redboot = be_flash_redboot(adapter, fw->data, 3968 continue;
3869 img_offset, img_size,
3870 filehdr_size + img_hdrs_size);
3871 if (!redboot)
3872 continue;
3873 }
3874 3969
3875 p = fw->data; 3970flash:
3876 p += filehdr_size + img_offset + img_hdrs_size; 3971 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
3877 if (p + img_size > fw->data + fw->size) 3972 if (p + img_size > fw->data + fw->size)
3878 return -1; 3973 return -1;
3879 3974
3880 status = be_flash(adapter, p, flash_cmd, img_optype, img_size); 3975 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3881 if (status) { 3976 /* For old FW images ignore ILLEGAL_FIELD error or errors on
3882 dev_err(&adapter->pdev->dev, 3977 * UFI_DIR region
3883 "Flashing section type %d failed.\n", 3978 */
3884 fsec->fsec_entry[i].type); 3979 if (old_fw_img &&
3885 return status; 3980 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
3981 (img_optype == OPTYPE_UFI_DIR &&
3982 base_status(status) == MCC_STATUS_FAILED))) {
3983 continue;
3984 } else if (status) {
3985 dev_err(dev, "Flashing section type 0x%x failed\n",
3986 img_type);
3987 return -EFAULT;
3886 } 3988 }
3887 } 3989 }
3888 return 0; 3990 return 0;
3889} 3991}
3890 3992
3891static int lancer_fw_download(struct be_adapter *adapter, 3993static int lancer_fw_download(struct be_adapter *adapter,
3892 const struct firmware *fw) 3994 const struct firmware *fw)
3893{ 3995{
3894#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) 3996#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3895#define LANCER_FW_DOWNLOAD_LOCATION "/prg" 3997#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
3955 } 4057 }
3956 4058
3957 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 4059 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3958 flash_cmd.dma); 4060 flash_cmd.dma);
3959 if (status) { 4061 if (status) {
3960 dev_err(&adapter->pdev->dev, 4062 dev_err(&adapter->pdev->dev,
3961 "Firmware load error. " 4063 "Firmware load error. "
@@ -3976,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
3976 goto lancer_fw_exit; 4078 goto lancer_fw_exit;
3977 } 4079 }
3978 } else if (change_status != LANCER_NO_RESET_NEEDED) { 4080 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3979 dev_err(&adapter->pdev->dev, 4081 dev_err(&adapter->pdev->dev,
3980 "System reboot required for new FW" 4082 "System reboot required for new FW to be active\n");
3981 " to be active\n");
3982 } 4083 }
3983 4084
3984 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 4085 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4042 switch (ufi_type) { 4143 switch (ufi_type) {
4043 case UFI_TYPE4: 4144 case UFI_TYPE4:
4044 status = be_flash_skyhawk(adapter, fw, 4145 status = be_flash_skyhawk(adapter, fw,
4045 &flash_cmd, num_imgs); 4146 &flash_cmd, num_imgs);
4046 break; 4147 break;
4047 case UFI_TYPE3R: 4148 case UFI_TYPE3R:
4048 status = be_flash_BEx(adapter, fw, &flash_cmd, 4149 status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4213,7 @@ fw_exit:
4112 return status; 4213 return status;
4113} 4214}
4114 4215
4115static int be_ndo_bridge_setlink(struct net_device *dev, 4216static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4116 struct nlmsghdr *nlh)
4117{ 4217{
4118 struct be_adapter *adapter = netdev_priv(dev); 4218 struct be_adapter *adapter = netdev_priv(dev);
4119 struct nlattr *attr, *br_spec; 4219 struct nlattr *attr, *br_spec;
@@ -4155,8 +4255,7 @@ err:
4155} 4255}
4156 4256
4157static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4257static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4158 struct net_device *dev, 4258 struct net_device *dev, u32 filter_mask)
4159 u32 filter_mask)
4160{ 4259{
4161 struct be_adapter *adapter = netdev_priv(dev); 4260 struct be_adapter *adapter = netdev_priv(dev);
4162 int status = 0; 4261 int status = 0;
@@ -4254,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
4254 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, 4353 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4255 .ndo_set_vf_mac = be_set_vf_mac, 4354 .ndo_set_vf_mac = be_set_vf_mac,
4256 .ndo_set_vf_vlan = be_set_vf_vlan, 4355 .ndo_set_vf_vlan = be_set_vf_vlan,
4257 .ndo_set_vf_tx_rate = be_set_vf_tx_rate, 4356 .ndo_set_vf_rate = be_set_vf_tx_rate,
4258 .ndo_get_vf_config = be_get_vf_config, 4357 .ndo_get_vf_config = be_get_vf_config,
4259 .ndo_set_vf_link_state = be_set_vf_link_state, 4358 .ndo_set_vf_link_state = be_set_vf_link_state,
4260#ifdef CONFIG_NET_POLL_CONTROLLER 4359#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4301,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
4301 4400
4302 netdev->netdev_ops = &be_netdev_ops; 4401 netdev->netdev_ops = &be_netdev_ops;
4303 4402
4304 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 4403 netdev->ethtool_ops = &be_ethtool_ops;
4305} 4404}
4306 4405
4307static void be_unmap_pci_bars(struct be_adapter *adapter) 4406static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
4870} 4969}
4871 4970
4872static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, 4971static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4873 pci_channel_state_t state) 4972 pci_channel_state_t state)
4874{ 4973{
4875 struct be_adapter *adapter = pci_get_drvdata(pdev); 4974 struct be_adapter *adapter = pci_get_drvdata(pdev);
4876 struct net_device *netdev = adapter->netdev; 4975 struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
769 return phy_mii_ioctl(phy, ifr, cmd); 769 return phy_mii_ioctl(phy, ifr, cmd);
770} 770}
771 771
772static int ethoc_config(struct net_device *dev, struct ifmap *map)
773{
774 return -ENOSYS;
775}
776
777static void ethoc_do_set_mac_address(struct net_device *dev) 772static void ethoc_do_set_mac_address(struct net_device *dev)
778{ 773{
779 struct ethoc *priv = netdev_priv(dev); 774 struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
995 .ndo_open = ethoc_open, 990 .ndo_open = ethoc_open,
996 .ndo_stop = ethoc_stop, 991 .ndo_stop = ethoc_stop,
997 .ndo_do_ioctl = ethoc_ioctl, 992 .ndo_do_ioctl = ethoc_ioctl,
998 .ndo_set_config = ethoc_config,
999 .ndo_set_mac_address = ethoc_set_mac_address, 993 .ndo_set_mac_address = ethoc_set_mac_address,
1000 .ndo_set_rx_mode = ethoc_set_multicast_list, 994 .ndo_set_rx_mode = ethoc_set_multicast_list,
1001 .ndo_change_mtu = ethoc_change_mtu, 995 .ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 68069eabc4f8..c77fa4a69844 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
1210 1210
1211 SET_NETDEV_DEV(netdev, &pdev->dev); 1211 SET_NETDEV_DEV(netdev, &pdev->dev);
1212 1212
1213 SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops); 1213 netdev->ethtool_ops = &ftgmac100_ethtool_ops;
1214 netdev->netdev_ops = &ftgmac100_netdev_ops; 1214 netdev->netdev_ops = &ftgmac100_netdev_ops;
1215 netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; 1215 netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
1216 1216
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8be5b40c0a12..4ff1adc6bfca 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
1085 } 1085 }
1086 1086
1087 SET_NETDEV_DEV(netdev, &pdev->dev); 1087 SET_NETDEV_DEV(netdev, &pdev->dev);
1088 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops); 1088 netdev->ethtool_ops = &ftmac100_ethtool_ops;
1089 netdev->netdev_ops = &ftmac100_netdev_ops; 1089 netdev->netdev_ops = &ftmac100_netdev_ops;
1090 1090
1091 platform_set_drvdata(pdev, netdev); 1091 platform_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6048dc8604ee..270308315d43 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -67,6 +67,7 @@ config FSL_XGMAC_MDIO
67 tristate "Freescale XGMAC MDIO" 67 tristate "Freescale XGMAC MDIO"
68 depends on FSL_SOC 68 depends on FSL_SOC
69 select PHYLIB 69 select PHYLIB
70 select OF_MDIO
70 ---help--- 71 ---help---
71 This driver supports the MDIO bus on the Fman 10G Ethernet MACs. 72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
72 73
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d19ff05..671d080105a7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -221,7 +221,7 @@ struct bufdesc_ex {
221#define BD_ENET_TX_RCMASK ((ushort)0x003c) 221#define BD_ENET_TX_RCMASK ((ushort)0x003c)
222#define BD_ENET_TX_UN ((ushort)0x0002) 222#define BD_ENET_TX_UN ((ushort)0x0002)
223#define BD_ENET_TX_CSL ((ushort)0x0001) 223#define BD_ENET_TX_CSL ((ushort)0x0001)
224#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 224#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
225 225
226/*enhanced buffer descriptor control/status used by Ethernet transmit*/ 226/*enhanced buffer descriptor control/status used by Ethernet transmit*/
227#define BD_ENET_TX_INT 0x40000000 227#define BD_ENET_TX_INT 0x40000000
@@ -246,8 +246,8 @@ struct bufdesc_ex {
246#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 246#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
247#define FEC_ENET_TX_FRSIZE 2048 247#define FEC_ENET_TX_FRSIZE 2048
248#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 248#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
249#define TX_RING_SIZE 16 /* Must be power of two */ 249#define TX_RING_SIZE 512 /* Must be power of two */
250#define TX_RING_MOD_MASK 15 /* for this to work */ 250#define TX_RING_MOD_MASK 511 /* for this to work */
251 251
252#define BD_ENET_RX_INT 0x00800000 252#define BD_ENET_RX_INT 0x00800000
253#define BD_ENET_RX_PTP ((ushort)0x0400) 253#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -296,8 +296,15 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short bufdesc_size;
299 unsigned short tx_ring_size; 300 unsigned short tx_ring_size;
300 unsigned short rx_ring_size; 301 unsigned short rx_ring_size;
302 unsigned short tx_stop_threshold;
303 unsigned short tx_wake_threshold;
304
305 /* Software TSO */
306 char *tso_hdrs;
307 dma_addr_t tso_hdrs_dma;
301 308
302 struct platform_device *pdev; 309 struct platform_device *pdev;
303 310
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8d69e439f0c5..38d9d276ab8b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -36,6 +36,7 @@
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/ip.h> 37#include <linux/ip.h>
38#include <net/ip.h> 38#include <net/ip.h>
39#include <net/tso.h>
39#include <linux/tcp.h> 40#include <linux/tcp.h>
40#include <linux/udp.h> 41#include <linux/udp.h>
41#include <linux/icmp.h> 42#include <linux/icmp.h>
@@ -54,6 +55,7 @@
54#include <linux/of_net.h> 55#include <linux/of_net.h>
55#include <linux/regulator/consumer.h> 56#include <linux/regulator/consumer.h>
56#include <linux/if_vlan.h> 57#include <linux/if_vlan.h>
58#include <linux/pinctrl/consumer.h>
57 59
58#include <asm/cacheflush.h> 60#include <asm/cacheflush.h>
59 61
@@ -172,10 +174,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
172#endif 174#endif
173#endif /* CONFIG_M5272 */ 175#endif /* CONFIG_M5272 */
174 176
175#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
176#error "FEC: descriptor ring size constants too large"
177#endif
178
179/* Interrupt events/masks. */ 177/* Interrupt events/masks. */
180#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 178#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
181#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 179#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
@@ -231,6 +229,15 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
231#define FEC_PAUSE_FLAG_AUTONEG 0x1 229#define FEC_PAUSE_FLAG_AUTONEG 0x1
232#define FEC_PAUSE_FLAG_ENABLE 0x2 230#define FEC_PAUSE_FLAG_ENABLE 0x2
233 231
232#define TSO_HEADER_SIZE 128
233/* Max number of allowed TCP segments for software TSO */
234#define FEC_MAX_TSO_SEGS 100
235#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
236
237#define IS_TSO_HEADER(txq, addr) \
238 ((addr >= txq->tso_hdrs_dma) && \
239 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
240
234static int mii_cnt; 241static int mii_cnt;
235 242
236static inline 243static inline
@@ -286,6 +293,22 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
286 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 293 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
287} 294}
288 295
296static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
297 struct fec_enet_private *fep)
298{
299 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
300}
301
302static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
303{
304 int entries;
305
306 entries = ((const char *)fep->dirty_tx -
307 (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
308
309 return entries > 0 ? entries : entries + fep->tx_ring_size;
310}
311
289static void *swap_buffer(void *bufaddr, int len) 312static void *swap_buffer(void *bufaddr, int len)
290{ 313{
291 int i; 314 int i;
@@ -307,33 +330,133 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
307 if (unlikely(skb_cow_head(skb, 0))) 330 if (unlikely(skb_cow_head(skb, 0)))
308 return -1; 331 return -1;
309 332
333 ip_hdr(skb)->check = 0;
310 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 334 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
311 335
312 return 0; 336 return 0;
313} 337}
314 338
315static netdev_tx_t 339static void
316fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 340fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
341{
342 const struct platform_device_id *id_entry =
343 platform_get_device_id(fep->pdev);
344 struct bufdesc *bdp_pre;
345
346 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
347 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
348 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
349 fep->delay_work.trig_tx = true;
350 schedule_delayed_work(&(fep->delay_work.delay_work),
351 msecs_to_jiffies(1));
352 }
353}
354
355static int
356fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
317{ 357{
318 struct fec_enet_private *fep = netdev_priv(ndev); 358 struct fec_enet_private *fep = netdev_priv(ndev);
319 const struct platform_device_id *id_entry = 359 const struct platform_device_id *id_entry =
320 platform_get_device_id(fep->pdev); 360 platform_get_device_id(fep->pdev);
321 struct bufdesc *bdp, *bdp_pre; 361 struct bufdesc *bdp = fep->cur_tx;
322 void *bufaddr; 362 struct bufdesc_ex *ebdp;
323 unsigned short status; 363 int nr_frags = skb_shinfo(skb)->nr_frags;
364 int frag, frag_len;
365 unsigned short status;
366 unsigned int estatus = 0;
367 skb_frag_t *this_frag;
324 unsigned int index; 368 unsigned int index;
369 void *bufaddr;
370 int i;
325 371
326 /* Fill in a Tx ring entry */ 372 for (frag = 0; frag < nr_frags; frag++) {
373 this_frag = &skb_shinfo(skb)->frags[frag];
374 bdp = fec_enet_get_nextdesc(bdp, fep);
375 ebdp = (struct bufdesc_ex *)bdp;
376
377 status = bdp->cbd_sc;
378 status &= ~BD_ENET_TX_STATS;
379 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
380 frag_len = skb_shinfo(skb)->frags[frag].size;
381
382 /* Handle the last BD specially */
383 if (frag == nr_frags - 1) {
384 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
385 if (fep->bufdesc_ex) {
386 estatus |= BD_ENET_TX_INT;
387 if (unlikely(skb_shinfo(skb)->tx_flags &
388 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
389 estatus |= BD_ENET_TX_TS;
390 }
391 }
392
393 if (fep->bufdesc_ex) {
394 if (skb->ip_summed == CHECKSUM_PARTIAL)
395 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
396 ebdp->cbd_bdu = 0;
397 ebdp->cbd_esc = estatus;
398 }
399
400 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
401
402 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
403 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
404 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
405 memcpy(fep->tx_bounce[index], bufaddr, frag_len);
406 bufaddr = fep->tx_bounce[index];
407
408 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
409 swap_buffer(bufaddr, frag_len);
410 }
411
412 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
413 frag_len, DMA_TO_DEVICE);
414 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
415 dev_kfree_skb_any(skb);
416 if (net_ratelimit())
417 netdev_err(ndev, "Tx DMA memory map failed\n");
418 goto dma_mapping_error;
419 }
420
421 bdp->cbd_datlen = frag_len;
422 bdp->cbd_sc = status;
423 }
424
425 fep->cur_tx = bdp;
426
427 return 0;
428
429dma_mapping_error:
327 bdp = fep->cur_tx; 430 bdp = fep->cur_tx;
431 for (i = 0; i < frag; i++) {
432 bdp = fec_enet_get_nextdesc(bdp, fep);
433 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
434 bdp->cbd_datlen, DMA_TO_DEVICE);
435 }
436 return NETDEV_TX_OK;
437}
328 438
329 status = bdp->cbd_sc; 439static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
440{
441 struct fec_enet_private *fep = netdev_priv(ndev);
442 const struct platform_device_id *id_entry =
443 platform_get_device_id(fep->pdev);
444 int nr_frags = skb_shinfo(skb)->nr_frags;
445 struct bufdesc *bdp, *last_bdp;
446 void *bufaddr;
447 unsigned short status;
448 unsigned short buflen;
449 unsigned int estatus = 0;
450 unsigned int index;
451 int entries_free;
452 int ret;
330 453
331 if (status & BD_ENET_TX_READY) { 454 entries_free = fec_enet_get_free_txdesc_num(fep);
332 /* Ooops. All transmit buffers are full. Bail out. 455 if (entries_free < MAX_SKB_FRAGS + 1) {
333 * This should not happen, since ndev->tbusy should be set. 456 dev_kfree_skb_any(skb);
334 */ 457 if (net_ratelimit())
335 netdev_err(ndev, "tx queue full!\n"); 458 netdev_err(ndev, "NOT enough BD for SG!\n");
336 return NETDEV_TX_BUSY; 459 return NETDEV_TX_OK;
337 } 460 }
338 461
339 /* Protocol checksum off-load for TCP and UDP. */ 462 /* Protocol checksum off-load for TCP and UDP. */
@@ -342,102 +465,300 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
342 return NETDEV_TX_OK; 465 return NETDEV_TX_OK;
343 } 466 }
344 467
345 /* Clear all of the status flags */ 468 /* Fill in a Tx ring entry */
469 bdp = fep->cur_tx;
470 status = bdp->cbd_sc;
346 status &= ~BD_ENET_TX_STATS; 471 status &= ~BD_ENET_TX_STATS;
347 472
348 /* Set buffer length and buffer pointer */ 473 /* Set buffer length and buffer pointer */
349 bufaddr = skb->data; 474 bufaddr = skb->data;
350 bdp->cbd_datlen = skb->len; 475 buflen = skb_headlen(skb);
351
352 /*
353 * On some FEC implementations data must be aligned on
354 * 4-byte boundaries. Use bounce buffers to copy data
355 * and get it aligned. Ugh.
356 */
357 if (fep->bufdesc_ex)
358 index = (struct bufdesc_ex *)bdp -
359 (struct bufdesc_ex *)fep->tx_bd_base;
360 else
361 index = bdp - fep->tx_bd_base;
362 476
363 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 477 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
364 memcpy(fep->tx_bounce[index], skb->data, skb->len); 478 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
479 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
480 memcpy(fep->tx_bounce[index], skb->data, buflen);
365 bufaddr = fep->tx_bounce[index]; 481 bufaddr = fep->tx_bounce[index];
366 }
367 482
368 /* 483 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
369 * Some design made an incorrect assumption on endian mode of 484 swap_buffer(bufaddr, buflen);
370 * the system that it's running on. As the result, driver has to 485 }
371 * swap every frame going to and coming from the controller.
372 */
373 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
374 swap_buffer(bufaddr, skb->len);
375
376 /* Save skb pointer */
377 fep->tx_skbuff[index] = skb;
378 486
379 /* Push the data cache so the CPM does not get stale memory 487 /* Push the data cache so the CPM does not get stale memory
380 * data. 488 * data.
381 */ 489 */
382 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 490 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
383 skb->len, DMA_TO_DEVICE); 491 buflen, DMA_TO_DEVICE);
384 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 492 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
385 bdp->cbd_bufaddr = 0;
386 fep->tx_skbuff[index] = NULL;
387 dev_kfree_skb_any(skb); 493 dev_kfree_skb_any(skb);
388 if (net_ratelimit()) 494 if (net_ratelimit())
389 netdev_err(ndev, "Tx DMA memory map failed\n"); 495 netdev_err(ndev, "Tx DMA memory map failed\n");
390 return NETDEV_TX_OK; 496 return NETDEV_TX_OK;
391 } 497 }
392 498
499 if (nr_frags) {
500 ret = fec_enet_txq_submit_frag_skb(skb, ndev);
501 if (ret)
502 return ret;
503 } else {
504 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
505 if (fep->bufdesc_ex) {
506 estatus = BD_ENET_TX_INT;
507 if (unlikely(skb_shinfo(skb)->tx_flags &
508 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
509 estatus |= BD_ENET_TX_TS;
510 }
511 }
512
393 if (fep->bufdesc_ex) { 513 if (fep->bufdesc_ex) {
394 514
395 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 515 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
396 ebdp->cbd_bdu = 0; 516
397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 517 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
398 fep->hwts_tx_en)) { 518 fep->hwts_tx_en))
399 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
400 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 519 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
401 } else {
402 ebdp->cbd_esc = BD_ENET_TX_INT;
403 520
404 /* Enable protocol checksum flags 521 if (skb->ip_summed == CHECKSUM_PARTIAL)
405 * We do not bother with the IP Checksum bits as they 522 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
406 * are done by the kernel 523
407 */ 524 ebdp->cbd_bdu = 0;
408 if (skb->ip_summed == CHECKSUM_PARTIAL) 525 ebdp->cbd_esc = estatus;
409 ebdp->cbd_esc |= BD_ENET_TX_PINS;
410 }
411 } 526 }
412 527
528 last_bdp = fep->cur_tx;
529 index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
530 /* Save skb pointer */
531 fep->tx_skbuff[index] = skb;
532
533 bdp->cbd_datlen = buflen;
534
413 /* Send it on its way. Tell FEC it's ready, interrupt when done, 535 /* Send it on its way. Tell FEC it's ready, interrupt when done,
414 * it's the last BD of the frame, and to put the CRC on the end. 536 * it's the last BD of the frame, and to put the CRC on the end.
415 */ 537 */
416 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 538 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
417 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
418 bdp->cbd_sc = status; 539 bdp->cbd_sc = status;
419 540
420 bdp_pre = fec_enet_get_prevdesc(bdp, fep); 541 fec_enet_submit_work(bdp, fep);
421 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
422 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
423 fep->delay_work.trig_tx = true;
424 schedule_delayed_work(&(fep->delay_work.delay_work),
425 msecs_to_jiffies(1));
426 }
427 542
428 /* If this was the last BD in the ring, start at the beginning again. */ 543 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 544 bdp = fec_enet_get_nextdesc(last_bdp, fep);
430 545
431 skb_tx_timestamp(skb); 546 skb_tx_timestamp(skb);
432 547
433 fep->cur_tx = bdp; 548 fep->cur_tx = bdp;
434 549
435 if (fep->cur_tx == fep->dirty_tx) 550 /* Trigger transmission start */
436 netif_stop_queue(ndev); 551 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
552
553 return 0;
554}
555
556static int
557fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
558 struct bufdesc *bdp, int index, char *data,
559 int size, bool last_tcp, bool is_last)
560{
561 struct fec_enet_private *fep = netdev_priv(ndev);
562 const struct platform_device_id *id_entry =
563 platform_get_device_id(fep->pdev);
564 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
565 unsigned short status;
566 unsigned int estatus = 0;
567
568 status = bdp->cbd_sc;
569 status &= ~BD_ENET_TX_STATS;
570
571 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
572 bdp->cbd_datlen = size;
573
574 if (((unsigned long) data) & FEC_ALIGNMENT ||
575 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
576 memcpy(fep->tx_bounce[index], data, size);
577 data = fep->tx_bounce[index];
578
579 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
580 swap_buffer(data, size);
581 }
582
583 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
584 size, DMA_TO_DEVICE);
585 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
586 dev_kfree_skb_any(skb);
587 if (net_ratelimit())
588 netdev_err(ndev, "Tx DMA memory map failed\n");
589 return NETDEV_TX_BUSY;
590 }
591
592 if (fep->bufdesc_ex) {
593 if (skb->ip_summed == CHECKSUM_PARTIAL)
594 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
595 ebdp->cbd_bdu = 0;
596 ebdp->cbd_esc = estatus;
597 }
598
599 /* Handle the last BD specially */
600 if (last_tcp)
601 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
602 if (is_last) {
603 status |= BD_ENET_TX_INTR;
604 if (fep->bufdesc_ex)
605 ebdp->cbd_esc |= BD_ENET_TX_INT;
606 }
607
608 bdp->cbd_sc = status;
609
610 return 0;
611}
612
613static int
614fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
615 struct bufdesc *bdp, int index)
616{
617 struct fec_enet_private *fep = netdev_priv(ndev);
618 const struct platform_device_id *id_entry =
619 platform_get_device_id(fep->pdev);
620 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
621 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
622 void *bufaddr;
623 unsigned long dmabuf;
624 unsigned short status;
625 unsigned int estatus = 0;
626
627 status = bdp->cbd_sc;
628 status &= ~BD_ENET_TX_STATS;
629 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
630
631 bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
632 dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
633 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
634 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
635 memcpy(fep->tx_bounce[index], skb->data, hdr_len);
636 bufaddr = fep->tx_bounce[index];
637
638 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
639 swap_buffer(bufaddr, hdr_len);
640
641 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
642 hdr_len, DMA_TO_DEVICE);
643 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
644 dev_kfree_skb_any(skb);
645 if (net_ratelimit())
646 netdev_err(ndev, "Tx DMA memory map failed\n");
647 return NETDEV_TX_BUSY;
648 }
649 }
650
651 bdp->cbd_bufaddr = dmabuf;
652 bdp->cbd_datlen = hdr_len;
653
654 if (fep->bufdesc_ex) {
655 if (skb->ip_summed == CHECKSUM_PARTIAL)
656 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
657 ebdp->cbd_bdu = 0;
658 ebdp->cbd_esc = estatus;
659 }
660
661 bdp->cbd_sc = status;
662
663 return 0;
664}
665
666static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
667{
668 struct fec_enet_private *fep = netdev_priv(ndev);
669 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
670 int total_len, data_left;
671 struct bufdesc *bdp = fep->cur_tx;
672 struct tso_t tso;
673 unsigned int index = 0;
674 int ret;
675
676 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
677 dev_kfree_skb_any(skb);
678 if (net_ratelimit())
679 netdev_err(ndev, "NOT enough BD for TSO!\n");
680 return NETDEV_TX_OK;
681 }
682
683 /* Protocol checksum off-load for TCP and UDP. */
684 if (fec_enet_clear_csum(skb, ndev)) {
685 dev_kfree_skb_any(skb);
686 return NETDEV_TX_OK;
687 }
688
689 /* Initialize the TSO handler, and prepare the first payload */
690 tso_start(skb, &tso);
691
692 total_len = skb->len - hdr_len;
693 while (total_len > 0) {
694 char *hdr;
695
696 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
697 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
698 total_len -= data_left;
699
700 /* prepare packet headers: MAC + IP + TCP */
701 hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
702 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
703 ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
704 if (ret)
705 goto err_release;
706
707 while (data_left > 0) {
708 int size;
709
710 size = min_t(int, tso.size, data_left);
711 bdp = fec_enet_get_nextdesc(bdp, fep);
712 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
713 ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
714 size, size == data_left,
715 total_len == 0);
716 if (ret)
717 goto err_release;
718
719 data_left -= size;
720 tso_build_data(skb, &tso, size);
721 }
722
723 bdp = fec_enet_get_nextdesc(bdp, fep);
724 }
725
726 /* Save skb pointer */
727 fep->tx_skbuff[index] = skb;
728
729 fec_enet_submit_work(bdp, fep);
730
731 skb_tx_timestamp(skb);
732 fep->cur_tx = bdp;
437 733
438 /* Trigger transmission start */ 734 /* Trigger transmission start */
439 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 735 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
440 736
737 return 0;
738
739err_release:
740 /* TODO: Release all used data descriptors for TSO */
741 return ret;
742}
743
744static netdev_tx_t
745fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
746{
747 struct fec_enet_private *fep = netdev_priv(ndev);
748 int entries_free;
749 int ret;
750
751 if (skb_is_gso(skb))
752 ret = fec_enet_txq_submit_tso(skb, ndev);
753 else
754 ret = fec_enet_txq_submit_skb(skb, ndev);
755 if (ret)
756 return ret;
757
758 entries_free = fec_enet_get_free_txdesc_num(fep);
759 if (entries_free <= fep->tx_stop_threshold)
760 netif_stop_queue(ndev);
761
441 return NETDEV_TX_OK; 762 return NETDEV_TX_OK;
442} 763}
443 764
@@ -756,6 +1077,7 @@ fec_enet_tx(struct net_device *ndev)
756 unsigned short status; 1077 unsigned short status;
757 struct sk_buff *skb; 1078 struct sk_buff *skb;
758 int index = 0; 1079 int index = 0;
1080 int entries_free;
759 1081
760 fep = netdev_priv(ndev); 1082 fep = netdev_priv(ndev);
761 bdp = fep->dirty_tx; 1083 bdp = fep->dirty_tx;
@@ -769,16 +1091,17 @@ fec_enet_tx(struct net_device *ndev)
769 if (bdp == fep->cur_tx) 1091 if (bdp == fep->cur_tx)
770 break; 1092 break;
771 1093
772 if (fep->bufdesc_ex) 1094 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
773 index = (struct bufdesc_ex *)bdp -
774 (struct bufdesc_ex *)fep->tx_bd_base;
775 else
776 index = bdp - fep->tx_bd_base;
777 1095
778 skb = fep->tx_skbuff[index]; 1096 skb = fep->tx_skbuff[index];
779 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, 1097 if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
780 DMA_TO_DEVICE); 1098 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1099 bdp->cbd_datlen, DMA_TO_DEVICE);
781 bdp->cbd_bufaddr = 0; 1100 bdp->cbd_bufaddr = 0;
1101 if (!skb) {
1102 bdp = fec_enet_get_nextdesc(bdp, fep);
1103 continue;
1104 }
782 1105
783 /* Check for errors. */ 1106 /* Check for errors. */
784 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1107 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -797,7 +1120,7 @@ fec_enet_tx(struct net_device *ndev)
797 ndev->stats.tx_carrier_errors++; 1120 ndev->stats.tx_carrier_errors++;
798 } else { 1121 } else {
799 ndev->stats.tx_packets++; 1122 ndev->stats.tx_packets++;
800 ndev->stats.tx_bytes += bdp->cbd_datlen; 1123 ndev->stats.tx_bytes += skb->len;
801 } 1124 }
802 1125
803 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1126 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -834,15 +1157,15 @@ fec_enet_tx(struct net_device *ndev)
834 1157
835 /* Since we have freed up a buffer, the ring is no longer full 1158 /* Since we have freed up a buffer, the ring is no longer full
836 */ 1159 */
837 if (fep->dirty_tx != fep->cur_tx) { 1160 if (netif_queue_stopped(ndev)) {
838 if (netif_queue_stopped(ndev)) 1161 entries_free = fec_enet_get_free_txdesc_num(fep);
1162 if (entries_free >= fep->tx_wake_threshold)
839 netif_wake_queue(ndev); 1163 netif_wake_queue(ndev);
840 } 1164 }
841 } 1165 }
842 return; 1166 return;
843} 1167}
844 1168
845
846/* During a receive, the cur_rx points to the current incoming buffer. 1169/* During a receive, the cur_rx points to the current incoming buffer.
847 * When we update through the ring, if the next incoming buffer has 1170 * When we update through the ring, if the next incoming buffer has
848 * not been given to the system, we just set the empty indicator, 1171 * not been given to the system, we just set the empty indicator,
@@ -920,11 +1243,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
920 pkt_len = bdp->cbd_datlen; 1243 pkt_len = bdp->cbd_datlen;
921 ndev->stats.rx_bytes += pkt_len; 1244 ndev->stats.rx_bytes += pkt_len;
922 1245
923 if (fep->bufdesc_ex) 1246 index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
924 index = (struct bufdesc_ex *)bdp -
925 (struct bufdesc_ex *)fep->rx_bd_base;
926 else
927 index = bdp - fep->rx_bd_base;
928 data = fep->rx_skbuff[index]->data; 1247 data = fep->rx_skbuff[index]->data;
929 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1248 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
930 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1249 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1255,6 +1574,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1255 return 0; 1574 return 0;
1256} 1575}
1257 1576
1577static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1578{
1579 struct fec_enet_private *fep = netdev_priv(ndev);
1580 int ret;
1581
1582 if (enable) {
1583 ret = clk_prepare_enable(fep->clk_ahb);
1584 if (ret)
1585 return ret;
1586 ret = clk_prepare_enable(fep->clk_ipg);
1587 if (ret)
1588 goto failed_clk_ipg;
1589 if (fep->clk_enet_out) {
1590 ret = clk_prepare_enable(fep->clk_enet_out);
1591 if (ret)
1592 goto failed_clk_enet_out;
1593 }
1594 if (fep->clk_ptp) {
1595 ret = clk_prepare_enable(fep->clk_ptp);
1596 if (ret)
1597 goto failed_clk_ptp;
1598 }
1599 } else {
1600 clk_disable_unprepare(fep->clk_ahb);
1601 clk_disable_unprepare(fep->clk_ipg);
1602 if (fep->clk_enet_out)
1603 clk_disable_unprepare(fep->clk_enet_out);
1604 if (fep->clk_ptp)
1605 clk_disable_unprepare(fep->clk_ptp);
1606 }
1607
1608 return 0;
1609failed_clk_ptp:
1610 if (fep->clk_enet_out)
1611 clk_disable_unprepare(fep->clk_enet_out);
1612failed_clk_enet_out:
1613 clk_disable_unprepare(fep->clk_ipg);
1614failed_clk_ipg:
1615 clk_disable_unprepare(fep->clk_ahb);
1616
1617 return ret;
1618}
1619
1258static int fec_enet_mii_probe(struct net_device *ndev) 1620static int fec_enet_mii_probe(struct net_device *ndev)
1259{ 1621{
1260 struct fec_enet_private *fep = netdev_priv(ndev); 1622 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1726,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1364 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1726 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1365 * document. 1727 * document.
1366 */ 1728 */
1367 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000); 1729 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1368 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) 1730 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1369 fep->phy_speed--; 1731 fep->phy_speed--;
1370 fep->phy_speed <<= 1; 1732 fep->phy_speed <<= 1;
@@ -1773,6 +2135,11 @@ fec_enet_open(struct net_device *ndev)
1773 struct fec_enet_private *fep = netdev_priv(ndev); 2135 struct fec_enet_private *fep = netdev_priv(ndev);
1774 int ret; 2136 int ret;
1775 2137
2138 pinctrl_pm_select_default_state(&fep->pdev->dev);
2139 ret = fec_enet_clk_enable(ndev, true);
2140 if (ret)
2141 return ret;
2142
1776 /* I should reset the ring buffers here, but I don't yet know 2143 /* I should reset the ring buffers here, but I don't yet know
1777 * a simple way to do that. 2144 * a simple way to do that.
1778 */ 2145 */
@@ -1811,6 +2178,8 @@ fec_enet_close(struct net_device *ndev)
1811 phy_disconnect(fep->phy_dev); 2178 phy_disconnect(fep->phy_dev);
1812 } 2179 }
1813 2180
2181 fec_enet_clk_enable(ndev, false);
2182 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
1814 fec_enet_free_buffers(ndev); 2183 fec_enet_free_buffers(ndev);
1815 2184
1816 return 0; 2185 return 0;
@@ -1988,13 +2357,35 @@ static int fec_enet_init(struct net_device *ndev)
1988 const struct platform_device_id *id_entry = 2357 const struct platform_device_id *id_entry =
1989 platform_get_device_id(fep->pdev); 2358 platform_get_device_id(fep->pdev);
1990 struct bufdesc *cbd_base; 2359 struct bufdesc *cbd_base;
2360 int bd_size;
2361
2362 /* init the tx & rx ring size */
2363 fep->tx_ring_size = TX_RING_SIZE;
2364 fep->rx_ring_size = RX_RING_SIZE;
2365
2366 fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2367 fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
2368
2369 if (fep->bufdesc_ex)
2370 fep->bufdesc_size = sizeof(struct bufdesc_ex);
2371 else
2372 fep->bufdesc_size = sizeof(struct bufdesc);
2373 bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
2374 fep->bufdesc_size;
1991 2375
1992 /* Allocate memory for buffer descriptors. */ 2376 /* Allocate memory for buffer descriptors. */
1993 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 2377 cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
1994 GFP_KERNEL); 2378 GFP_KERNEL);
1995 if (!cbd_base) 2379 if (!cbd_base)
1996 return -ENOMEM; 2380 return -ENOMEM;
1997 2381
2382 fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
2383 &fep->tso_hdrs_dma, GFP_KERNEL);
2384 if (!fep->tso_hdrs) {
2385 dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
2386 return -ENOMEM;
2387 }
2388
1998 memset(cbd_base, 0, PAGE_SIZE); 2389 memset(cbd_base, 0, PAGE_SIZE);
1999 2390
2000 fep->netdev = ndev; 2391 fep->netdev = ndev;
@@ -2004,10 +2395,6 @@ static int fec_enet_init(struct net_device *ndev)
2004 /* make sure MAC we just acquired is programmed into the hw */ 2395 /* make sure MAC we just acquired is programmed into the hw */
2005 fec_set_mac_address(ndev, NULL); 2396 fec_set_mac_address(ndev, NULL);
2006 2397
2007 /* init the tx & rx ring size */
2008 fep->tx_ring_size = TX_RING_SIZE;
2009 fep->rx_ring_size = RX_RING_SIZE;
2010
2011 /* Set receive and transmit descriptor base. */ 2398 /* Set receive and transmit descriptor base. */
2012 fep->rx_bd_base = cbd_base; 2399 fep->rx_bd_base = cbd_base;
2013 if (fep->bufdesc_ex) 2400 if (fep->bufdesc_ex)
@@ -2024,21 +2411,21 @@ static int fec_enet_init(struct net_device *ndev)
2024 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 2411 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2025 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 2412 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2026 2413
2027 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { 2414 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
2028 /* enable hw VLAN support */ 2415 /* enable hw VLAN support */
2029 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 2416 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2030 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2031 }
2032 2417
2033 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { 2418 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2419 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
2420
2034 /* enable hw accelerator */ 2421 /* enable hw accelerator */
2035 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2422 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2036 | NETIF_F_RXCSUM); 2423 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
2037 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2038 | NETIF_F_RXCSUM);
2039 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 2424 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2040 } 2425 }
2041 2426
2427 ndev->hw_features = ndev->features;
2428
2042 fec_restart(ndev, 0); 2429 fec_restart(ndev, 0);
2043 2430
2044 return 0; 2431 return 0;
@@ -2114,6 +2501,9 @@ fec_probe(struct platform_device *pdev)
2114 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 2501 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2115#endif 2502#endif
2116 2503
2504 /* Select default pin state */
2505 pinctrl_pm_select_default_state(&pdev->dev);
2506
2117 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2507 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2118 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 2508 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2119 if (IS_ERR(fep->hwp)) { 2509 if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2554,10 @@ fec_probe(struct platform_device *pdev)
2164 fep->bufdesc_ex = 0; 2554 fep->bufdesc_ex = 0;
2165 } 2555 }
2166 2556
2167 ret = clk_prepare_enable(fep->clk_ahb); 2557 ret = fec_enet_clk_enable(ndev, true);
2168 if (ret) 2558 if (ret)
2169 goto failed_clk; 2559 goto failed_clk;
2170 2560
2171 ret = clk_prepare_enable(fep->clk_ipg);
2172 if (ret)
2173 goto failed_clk_ipg;
2174
2175 if (fep->clk_enet_out) {
2176 ret = clk_prepare_enable(fep->clk_enet_out);
2177 if (ret)
2178 goto failed_clk_enet_out;
2179 }
2180
2181 if (fep->clk_ptp) {
2182 ret = clk_prepare_enable(fep->clk_ptp);
2183 if (ret)
2184 goto failed_clk_ptp;
2185 }
2186
2187 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 2561 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2188 if (!IS_ERR(fep->reg_phy)) { 2562 if (!IS_ERR(fep->reg_phy)) {
2189 ret = regulator_enable(fep->reg_phy); 2563 ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2599,8 @@ fec_probe(struct platform_device *pdev)
2225 2599
2226 /* Carrier starts down, phylib will bring it up */ 2600 /* Carrier starts down, phylib will bring it up */
2227 netif_carrier_off(ndev); 2601 netif_carrier_off(ndev);
2602 fec_enet_clk_enable(ndev, false);
2603 pinctrl_pm_select_sleep_state(&pdev->dev);
2228 2604
2229 ret = register_netdev(ndev); 2605 ret = register_netdev(ndev);
2230 if (ret) 2606 if (ret)
@@ -2244,15 +2620,7 @@ failed_init:
2244 if (fep->reg_phy) 2620 if (fep->reg_phy)
2245 regulator_disable(fep->reg_phy); 2621 regulator_disable(fep->reg_phy);
2246failed_regulator: 2622failed_regulator:
2247 if (fep->clk_ptp) 2623 fec_enet_clk_enable(ndev, false);
2248 clk_disable_unprepare(fep->clk_ptp);
2249failed_clk_ptp:
2250 if (fep->clk_enet_out)
2251 clk_disable_unprepare(fep->clk_enet_out);
2252failed_clk_enet_out:
2253 clk_disable_unprepare(fep->clk_ipg);
2254failed_clk_ipg:
2255 clk_disable_unprepare(fep->clk_ahb);
2256failed_clk: 2624failed_clk:
2257failed_ioremap: 2625failed_ioremap:
2258 free_netdev(ndev); 2626 free_netdev(ndev);
@@ -2272,14 +2640,9 @@ fec_drv_remove(struct platform_device *pdev)
2272 del_timer_sync(&fep->time_keep); 2640 del_timer_sync(&fep->time_keep);
2273 if (fep->reg_phy) 2641 if (fep->reg_phy)
2274 regulator_disable(fep->reg_phy); 2642 regulator_disable(fep->reg_phy);
2275 if (fep->clk_ptp)
2276 clk_disable_unprepare(fep->clk_ptp);
2277 if (fep->ptp_clock) 2643 if (fep->ptp_clock)
2278 ptp_clock_unregister(fep->ptp_clock); 2644 ptp_clock_unregister(fep->ptp_clock);
2279 if (fep->clk_enet_out) 2645 fec_enet_clk_enable(ndev, false);
2280 clk_disable_unprepare(fep->clk_enet_out);
2281 clk_disable_unprepare(fep->clk_ipg);
2282 clk_disable_unprepare(fep->clk_ahb);
2283 free_netdev(ndev); 2646 free_netdev(ndev);
2284 2647
2285 return 0; 2648 return 0;
@@ -2296,12 +2659,8 @@ fec_suspend(struct device *dev)
2296 fec_stop(ndev); 2659 fec_stop(ndev);
2297 netif_device_detach(ndev); 2660 netif_device_detach(ndev);
2298 } 2661 }
2299 if (fep->clk_ptp) 2662 fec_enet_clk_enable(ndev, false);
2300 clk_disable_unprepare(fep->clk_ptp); 2663 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2301 if (fep->clk_enet_out)
2302 clk_disable_unprepare(fep->clk_enet_out);
2303 clk_disable_unprepare(fep->clk_ipg);
2304 clk_disable_unprepare(fep->clk_ahb);
2305 2664
2306 if (fep->reg_phy) 2665 if (fep->reg_phy)
2307 regulator_disable(fep->reg_phy); 2666 regulator_disable(fep->reg_phy);
@@ -2322,25 +2681,10 @@ fec_resume(struct device *dev)
2322 return ret; 2681 return ret;
2323 } 2682 }
2324 2683
2325 ret = clk_prepare_enable(fep->clk_ahb); 2684 pinctrl_pm_select_default_state(&fep->pdev->dev);
2685 ret = fec_enet_clk_enable(ndev, true);
2326 if (ret) 2686 if (ret)
2327 goto failed_clk_ahb; 2687 goto failed_clk;
2328
2329 ret = clk_prepare_enable(fep->clk_ipg);
2330 if (ret)
2331 goto failed_clk_ipg;
2332
2333 if (fep->clk_enet_out) {
2334 ret = clk_prepare_enable(fep->clk_enet_out);
2335 if (ret)
2336 goto failed_clk_enet_out;
2337 }
2338
2339 if (fep->clk_ptp) {
2340 ret = clk_prepare_enable(fep->clk_ptp);
2341 if (ret)
2342 goto failed_clk_ptp;
2343 }
2344 2688
2345 if (netif_running(ndev)) { 2689 if (netif_running(ndev)) {
2346 fec_restart(ndev, fep->full_duplex); 2690 fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2693,7 @@ fec_resume(struct device *dev)
2349 2693
2350 return 0; 2694 return 0;
2351 2695
2352failed_clk_ptp: 2696failed_clk:
2353 if (fep->clk_enet_out)
2354 clk_disable_unprepare(fep->clk_enet_out);
2355failed_clk_enet_out:
2356 clk_disable_unprepare(fep->clk_ipg);
2357failed_clk_ipg:
2358 clk_disable_unprepare(fep->clk_ahb);
2359failed_clk_ahb:
2360 if (fep->reg_phy) 2697 if (fep->reg_phy)
2361 regulator_disable(fep->reg_phy); 2698 regulator_disable(fep->reg_phy);
2362 return ret; 2699 return ret;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc80db41d6b3..cfaf17b70f3f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -792,10 +792,6 @@ static int fs_init_phy(struct net_device *dev)
792 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, 792 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
793 iface); 793 iface);
794 if (!phydev) { 794 if (!phydev) {
795 phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
796 iface);
797 }
798 if (!phydev) {
799 dev_err(&dev->dev, "Could not attach to PHY\n"); 795 dev_err(&dev->dev, "Could not attach to PHY\n");
800 return -ENODEV; 796 return -ENODEV;
801 } 797 }
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
1029 fpi->use_napi = 1; 1025 fpi->use_napi = 1;
1030 fpi->napi_weight = 17; 1026 fpi->napi_weight = 17;
1031 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 1027 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1032 if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", 1028 if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
1033 NULL))) 1029 err = of_phy_register_fixed_link(ofdev->dev.of_node);
1034 goto out_free_fpi; 1030 if (err)
1031 goto out_free_fpi;
1032
1033 /* In the case of a fixed PHY, the DT node associated
1034 * to the PHY is the Ethernet MAC DT node.
1035 */
1036 fpi->phy_node = ofdev->dev.of_node;
1037 }
1035 1038
1036 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { 1039 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
1037 phy_connection_type = of_get_property(ofdev->dev.of_node, 1040 phy_connection_type = of_get_property(ofdev->dev.of_node,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ee6ddbd4f252..a6cf40e62f3a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -889,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
889 889
890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
891 891
892 /* In the case of a fixed PHY, the DT node associated
893 * to the PHY is the Ethernet MAC DT node.
894 */
895 if (of_phy_is_fixed_link(np)) {
896 err = of_phy_register_fixed_link(np);
897 if (err)
898 goto err_grp_init;
899
900 priv->phy_node = np;
901 }
902
892 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 903 /* Find the TBI PHY. If it's not there, we don't support SGMII */
893 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 904 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
894 905
@@ -1231,7 +1242,7 @@ static void gfar_hw_init(struct gfar_private *priv)
1231 gfar_write_isrg(priv); 1242 gfar_write_isrg(priv);
1232} 1243}
1233 1244
1234static void __init gfar_init_addr_hash_table(struct gfar_private *priv) 1245static void gfar_init_addr_hash_table(struct gfar_private *priv)
1235{ 1246{
1236 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1247 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1237 1248
@@ -1373,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
1373 1384
1374 gfar_hw_init(priv); 1385 gfar_hw_init(priv);
1375 1386
1387 /* Carrier starts down, phylib will bring it up */
1388 netif_carrier_off(dev);
1389
1376 err = register_netdev(dev); 1390 err = register_netdev(dev);
1377 1391
1378 if (err) { 1392 if (err) {
@@ -1380,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
1380 goto register_fail; 1394 goto register_fail;
1381 } 1395 }
1382 1396
1383 /* Carrier starts down, phylib will bring it up */
1384 netif_carrier_off(dev);
1385
1386 device_init_wakeup(&dev->dev, 1397 device_init_wakeup(&dev->dev,
1387 priv->device_flags & 1398 priv->device_flags &
1388 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1399 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1660,9 +1671,6 @@ static int init_phy(struct net_device *dev)
1660 1671
1661 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1672 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1662 interface); 1673 interface);
1663 if (!priv->phydev)
1664 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1665 interface);
1666 if (!priv->phydev) { 1674 if (!priv->phydev) {
1667 dev_err(&dev->dev, "could not attach to PHY\n"); 1675 dev_err(&dev->dev, "could not attach to PHY\n");
1668 return -ENODEV; 1676 return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c8299c31b21f..fab39e295441 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
1728 1728
1729 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1729 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
1730 priv->phy_interface); 1730 priv->phy_interface);
1731 if (!phydev)
1732 phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1733 priv->phy_interface);
1734 if (!phydev) { 1731 if (!phydev) {
1735 dev_err(&dev->dev, "Could not attach to PHY\n"); 1732 dev_err(&dev->dev, "Could not attach to PHY\n");
1736 return -ENODEV; 1733 return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3790 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3787 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3791 3788
3792 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); 3789 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
3790 if (!ug_info->phy_node) {
3791 /* In the case of a fixed PHY, the DT node associated
3792 * to the PHY is the Ethernet MAC DT node.
3793 */
3794 if (of_phy_is_fixed_link(np)) {
3795 err = of_phy_register_fixed_link(np);
3796 if (err)
3797 return err;
3798 }
3799 ug_info->phy_node = np;
3800 }
3793 3801
3794 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3802 /* Find the TBI PHY node. If it's not there, we don't support SGMII */
3795 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3803 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 413329eff2ff..cc83350d56ba 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
417 417
418void uec_set_ethtool_ops(struct net_device *netdev) 418void uec_set_ethtool_ops(struct net_device *netdev)
419{ 419{
420 SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); 420 netdev->ethtool_ops = &uec_ethtool_ops;
421} 421}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d449fcb90199..0c9d55c862ae 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -162,7 +162,9 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
162 162
163 /* Return all Fs if nothing was there */ 163 /* Return all Fs if nothing was there */
164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) { 164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
165 dev_err(&bus->dev, "MDIO read error\n"); 165 dev_err(&bus->dev,
166 "Error while reading PHY%d reg at %d.%d\n",
167 phy_id, dev_addr, regnum);
166 return 0xffff; 168 return 0xffff;
167 } 169 }
168 170
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7becab1aa3e4..cfe7a7431730 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
256 dev->netdev_ops = &fjn_netdev_ops; 256 dev->netdev_ops = &fjn_netdev_ops;
257 dev->watchdog_timeo = TX_TIMEOUT; 257 dev->watchdog_timeo = TX_TIMEOUT;
258 258
259 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 259 dev->ethtool_ops = &netdev_ethtool_ops;
260 260
261 return fmvj18x_config(link); 261 return fmvj18x_config(link);
262} /* fmvj18x_attach */ 262} /* fmvj18x_attach */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 000000000000..e9421731b05e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,27 @@
1#
2# HISILICON device configuration
3#
4
5config NET_VENDOR_HISILICON
6 bool "Hisilicon devices"
7 default y
8 depends on ARM
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Hisilicon devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_HISILICON
20
21config HIX5HD2_GMAC
22 tristate "Hisilicon HIX5HD2 Family Network Device Support"
23 select PHYLIB
24 help
25 This selects the hix5hd2 mac family network device.
26
27endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 000000000000..9175e84622d4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the HISILICON network device drivers.
3#
4
5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 000000000000..0ffdcd381fdd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1066 @@
1/* Copyright (c) 2014 Linaro Ltd.
2 * Copyright (c) 2014 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/of_net.h>
15#include <linux/of_mdio.h>
16#include <linux/clk.h>
17#include <linux/circ_buf.h>
18
19#define STATION_ADDR_LOW 0x0000
20#define STATION_ADDR_HIGH 0x0004
21#define MAC_DUPLEX_HALF_CTRL 0x0008
22#define MAX_FRM_SIZE 0x003c
23#define PORT_MODE 0x0040
24#define PORT_EN 0x0044
25#define BITS_TX_EN BIT(2)
26#define BITS_RX_EN BIT(1)
27#define REC_FILT_CONTROL 0x0064
28#define BIT_CRC_ERR_PASS BIT(5)
29#define BIT_PAUSE_FRM_PASS BIT(4)
30#define BIT_VLAN_DROP_EN BIT(3)
31#define BIT_BC_DROP_EN BIT(2)
32#define BIT_MC_MATCH_EN BIT(1)
33#define BIT_UC_MATCH_EN BIT(0)
34#define PORT_MC_ADDR_LOW 0x0068
35#define PORT_MC_ADDR_HIGH 0x006C
36#define CF_CRC_STRIP 0x01b0
37#define MODE_CHANGE_EN 0x01b4
38#define BIT_MODE_CHANGE_EN BIT(0)
39#define COL_SLOT_TIME 0x01c0
40#define RECV_CONTROL 0x01e0
41#define BIT_STRIP_PAD_EN BIT(3)
42#define BIT_RUNT_PKT_EN BIT(4)
43#define CONTROL_WORD 0x0214
44#define MDIO_SINGLE_CMD 0x03c0
45#define MDIO_SINGLE_DATA 0x03c4
46#define MDIO_CTRL 0x03cc
47#define MDIO_RDATA_STATUS 0x03d0
48
49#define MDIO_START BIT(20)
50#define MDIO_R_VALID BIT(0)
51#define MDIO_READ (BIT(17) | MDIO_START)
52#define MDIO_WRITE (BIT(16) | MDIO_START)
53
54#define RX_FQ_START_ADDR 0x0500
55#define RX_FQ_DEPTH 0x0504
56#define RX_FQ_WR_ADDR 0x0508
57#define RX_FQ_RD_ADDR 0x050c
58#define RX_FQ_VLDDESC_CNT 0x0510
59#define RX_FQ_ALEMPTY_TH 0x0514
60#define RX_FQ_REG_EN 0x0518
61#define BITS_RX_FQ_START_ADDR_EN BIT(2)
62#define BITS_RX_FQ_DEPTH_EN BIT(1)
63#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
64#define RX_FQ_ALFULL_TH 0x051c
65#define RX_BQ_START_ADDR 0x0520
66#define RX_BQ_DEPTH 0x0524
67#define RX_BQ_WR_ADDR 0x0528
68#define RX_BQ_RD_ADDR 0x052c
69#define RX_BQ_FREE_DESC_CNT 0x0530
70#define RX_BQ_ALEMPTY_TH 0x0534
71#define RX_BQ_REG_EN 0x0538
72#define BITS_RX_BQ_START_ADDR_EN BIT(2)
73#define BITS_RX_BQ_DEPTH_EN BIT(1)
74#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
75#define RX_BQ_ALFULL_TH 0x053c
76#define TX_BQ_START_ADDR 0x0580
77#define TX_BQ_DEPTH 0x0584
78#define TX_BQ_WR_ADDR 0x0588
79#define TX_BQ_RD_ADDR 0x058c
80#define TX_BQ_VLDDESC_CNT 0x0590
81#define TX_BQ_ALEMPTY_TH 0x0594
82#define TX_BQ_REG_EN 0x0598
83#define BITS_TX_BQ_START_ADDR_EN BIT(2)
84#define BITS_TX_BQ_DEPTH_EN BIT(1)
85#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
86#define TX_BQ_ALFULL_TH 0x059c
87#define TX_RQ_START_ADDR 0x05a0
88#define TX_RQ_DEPTH 0x05a4
89#define TX_RQ_WR_ADDR 0x05a8
90#define TX_RQ_RD_ADDR 0x05ac
91#define TX_RQ_FREE_DESC_CNT 0x05b0
92#define TX_RQ_ALEMPTY_TH 0x05b4
93#define TX_RQ_REG_EN 0x05b8
94#define BITS_TX_RQ_START_ADDR_EN BIT(2)
95#define BITS_TX_RQ_DEPTH_EN BIT(1)
96#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
97#define TX_RQ_ALFULL_TH 0x05bc
98#define RAW_PMU_INT 0x05c0
99#define ENA_PMU_INT 0x05c4
100#define STATUS_PMU_INT 0x05c8
101#define MAC_FIFO_ERR_IN BIT(30)
102#define TX_RQ_IN_TIMEOUT_INT BIT(29)
103#define RX_BQ_IN_TIMEOUT_INT BIT(28)
104#define TXOUTCFF_FULL_INT BIT(27)
105#define TXOUTCFF_EMPTY_INT BIT(26)
106#define TXCFF_FULL_INT BIT(25)
107#define TXCFF_EMPTY_INT BIT(24)
108#define RXOUTCFF_FULL_INT BIT(23)
109#define RXOUTCFF_EMPTY_INT BIT(22)
110#define RXCFF_FULL_INT BIT(21)
111#define RXCFF_EMPTY_INT BIT(20)
112#define TX_RQ_IN_INT BIT(19)
113#define TX_BQ_OUT_INT BIT(18)
114#define RX_BQ_IN_INT BIT(17)
115#define RX_FQ_OUT_INT BIT(16)
116#define TX_RQ_EMPTY_INT BIT(15)
117#define TX_RQ_FULL_INT BIT(14)
118#define TX_RQ_ALEMPTY_INT BIT(13)
119#define TX_RQ_ALFULL_INT BIT(12)
120#define TX_BQ_EMPTY_INT BIT(11)
121#define TX_BQ_FULL_INT BIT(10)
122#define TX_BQ_ALEMPTY_INT BIT(9)
123#define TX_BQ_ALFULL_INT BIT(8)
124#define RX_BQ_EMPTY_INT BIT(7)
125#define RX_BQ_FULL_INT BIT(6)
126#define RX_BQ_ALEMPTY_INT BIT(5)
127#define RX_BQ_ALFULL_INT BIT(4)
128#define RX_FQ_EMPTY_INT BIT(3)
129#define RX_FQ_FULL_INT BIT(2)
130#define RX_FQ_ALEMPTY_INT BIT(1)
131#define RX_FQ_ALFULL_INT BIT(0)
132
133#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
134 TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
135
136#define DESC_WR_RD_ENA 0x05cc
137#define IN_QUEUE_TH 0x05d8
138#define OUT_QUEUE_TH 0x05dc
139#define QUEUE_TX_BQ_SHIFT 16
140#define RX_BQ_IN_TIMEOUT_TH 0x05e0
141#define TX_RQ_IN_TIMEOUT_TH 0x05e4
142#define STOP_CMD 0x05e8
143#define BITS_TX_STOP BIT(1)
144#define BITS_RX_STOP BIT(0)
145#define FLUSH_CMD 0x05eC
146#define BITS_TX_FLUSH_CMD BIT(5)
147#define BITS_RX_FLUSH_CMD BIT(4)
148#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
149#define BITS_TX_FLUSH_FLAG_UP BIT(2)
150#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
151#define BITS_RX_FLUSH_FLAG_UP BIT(0)
152#define RX_CFF_NUM_REG 0x05f0
153#define PMU_FSM_REG 0x05f8
154#define RX_FIFO_PKT_IN_NUM 0x05fc
155#define RX_FIFO_PKT_OUT_NUM 0x0600
156
157#define RGMII_SPEED_1000 0x2c
158#define RGMII_SPEED_100 0x2f
159#define RGMII_SPEED_10 0x2d
160#define MII_SPEED_100 0x0f
161#define MII_SPEED_10 0x0d
162#define GMAC_SPEED_1000 0x05
163#define GMAC_SPEED_100 0x01
164#define GMAC_SPEED_10 0x00
165#define GMAC_FULL_DUPLEX BIT(4)
166
167#define RX_BQ_INT_THRESHOLD 0x01
168#define TX_RQ_INT_THRESHOLD 0x01
169#define RX_BQ_IN_TIMEOUT 0x10000
170#define TX_RQ_IN_TIMEOUT 0x50000
171
172#define MAC_MAX_FRAME_SIZE 1600
173#define DESC_SIZE 32
174#define RX_DESC_NUM 1024
175#define TX_DESC_NUM 1024
176
177#define DESC_VLD_FREE 0
178#define DESC_VLD_BUSY 0x80000000
179#define DESC_FL_MID 0
180#define DESC_FL_LAST 0x20000000
181#define DESC_FL_FIRST 0x40000000
182#define DESC_FL_FULL 0x60000000
183#define DESC_DATA_LEN_OFF 16
184#define DESC_BUFF_LEN_OFF 0
185#define DESC_DATA_MASK 0x7ff
186
187/* DMA descriptor ring helpers */
188#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
189#define dma_cnt(n) ((n) >> 5)
190#define dma_byte(n) ((n) << 5)
191
192struct hix5hd2_desc {
193 __le32 buff_addr;
194 __le32 cmd;
195} __aligned(32);
196
197struct hix5hd2_desc_sw {
198 struct hix5hd2_desc *desc;
199 dma_addr_t phys_addr;
200 unsigned int count;
201 unsigned int size;
202};
203
204#define QUEUE_NUMS 4
205struct hix5hd2_priv {
206 struct hix5hd2_desc_sw pool[QUEUE_NUMS];
207#define rx_fq pool[0]
208#define rx_bq pool[1]
209#define tx_bq pool[2]
210#define tx_rq pool[3]
211
212 void __iomem *base;
213 void __iomem *ctrl_base;
214
215 struct sk_buff *tx_skb[TX_DESC_NUM];
216 struct sk_buff *rx_skb[RX_DESC_NUM];
217
218 struct device *dev;
219 struct net_device *netdev;
220
221 struct phy_device *phy;
222 struct device_node *phy_node;
223 phy_interface_t phy_mode;
224
225 unsigned int speed;
226 unsigned int duplex;
227
228 struct clk *clk;
229 struct mii_bus *bus;
230 struct napi_struct napi;
231 struct work_struct tx_timeout_task;
232};
233
234static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
235{
236 struct hix5hd2_priv *priv = netdev_priv(dev);
237 u32 val;
238
239 priv->speed = speed;
240 priv->duplex = duplex;
241
242 switch (priv->phy_mode) {
243 case PHY_INTERFACE_MODE_RGMII:
244 if (speed == SPEED_1000)
245 val = RGMII_SPEED_1000;
246 else if (speed == SPEED_100)
247 val = RGMII_SPEED_100;
248 else
249 val = RGMII_SPEED_10;
250 break;
251 case PHY_INTERFACE_MODE_MII:
252 if (speed == SPEED_100)
253 val = MII_SPEED_100;
254 else
255 val = MII_SPEED_10;
256 break;
257 default:
258 netdev_warn(dev, "not supported mode\n");
259 val = MII_SPEED_10;
260 break;
261 }
262
263 if (duplex)
264 val |= GMAC_FULL_DUPLEX;
265 writel_relaxed(val, priv->ctrl_base);
266
267 writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
268 if (speed == SPEED_1000)
269 val = GMAC_SPEED_1000;
270 else if (speed == SPEED_100)
271 val = GMAC_SPEED_100;
272 else
273 val = GMAC_SPEED_10;
274 writel_relaxed(val, priv->base + PORT_MODE);
275 writel_relaxed(0, priv->base + MODE_CHANGE_EN);
276 writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
277}
278
279static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
280{
281 writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
282 writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
283 writel_relaxed(0, priv->base + RX_FQ_REG_EN);
284
285 writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
286 writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
287 writel_relaxed(0, priv->base + RX_BQ_REG_EN);
288
289 writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
290 writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
291 writel_relaxed(0, priv->base + TX_BQ_REG_EN);
292
293 writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
294 writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
295 writel_relaxed(0, priv->base + TX_RQ_REG_EN);
296}
297
298static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
299{
300 writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
301 writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
302 writel_relaxed(0, priv->base + RX_FQ_REG_EN);
303}
304
305static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
306{
307 writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
308 writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
309 writel_relaxed(0, priv->base + RX_BQ_REG_EN);
310}
311
312static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
313{
314 writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
315 writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
316 writel_relaxed(0, priv->base + TX_BQ_REG_EN);
317}
318
319static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
320{
321 writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
322 writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
323 writel_relaxed(0, priv->base + TX_RQ_REG_EN);
324}
325
326static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
327{
328 hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
329 hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
330 hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
331 hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
332}
333
334static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
335{
336 u32 val;
337
338 /* disable and clear all interrupts */
339 writel_relaxed(0, priv->base + ENA_PMU_INT);
340 writel_relaxed(~0, priv->base + RAW_PMU_INT);
341
342 writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
343 writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
344 writel_relaxed(0, priv->base + COL_SLOT_TIME);
345
346 val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
347 writel_relaxed(val, priv->base + IN_QUEUE_TH);
348
349 writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
350 writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
351
352 hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
353 hix5hd2_set_desc_addr(priv);
354}
355
356static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
357{
358 writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
359}
360
361static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
362{
363 writel_relaxed(0, priv->base + ENA_PMU_INT);
364}
365
366static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
367{
368 writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
369 writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
370}
371
372static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
373{
374 writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
375 writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
376}
377
378static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
379{
380 struct hix5hd2_priv *priv = netdev_priv(dev);
381 unsigned char *mac = dev->dev_addr;
382 u32 val;
383
384 val = mac[1] | (mac[0] << 8);
385 writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
386
387 val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
388 writel_relaxed(val, priv->base + STATION_ADDR_LOW);
389}
390
391static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
392{
393 int ret;
394
395 ret = eth_mac_addr(dev, p);
396 if (!ret)
397 hix5hd2_hw_set_mac_addr(dev);
398
399 return ret;
400}
401
402static void hix5hd2_adjust_link(struct net_device *dev)
403{
404 struct hix5hd2_priv *priv = netdev_priv(dev);
405 struct phy_device *phy = priv->phy;
406
407 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
408 hix5hd2_config_port(dev, phy->speed, phy->duplex);
409 phy_print_status(phy);
410 }
411}
412
413static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
414{
415 struct hix5hd2_desc *desc;
416 struct sk_buff *skb;
417 u32 start, end, num, pos, i;
418 u32 len = MAC_MAX_FRAME_SIZE;
419 dma_addr_t addr;
420
421 /* software write pointer */
422 start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
423 /* logic read pointer */
424 end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
425 num = CIRC_SPACE(start, end, RX_DESC_NUM);
426
427 for (i = 0, pos = start; i < num; i++) {
428 if (priv->rx_skb[pos]) {
429 break;
430 } else {
431 skb = netdev_alloc_skb_ip_align(priv->netdev, len);
432 if (unlikely(skb == NULL))
433 break;
434 }
435
436 addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
437 if (dma_mapping_error(priv->dev, addr)) {
438 dev_kfree_skb_any(skb);
439 break;
440 }
441
442 desc = priv->rx_fq.desc + pos;
443 desc->buff_addr = cpu_to_le32(addr);
444 priv->rx_skb[pos] = skb;
445 desc->cmd = cpu_to_le32(DESC_VLD_FREE |
446 (len - 1) << DESC_BUFF_LEN_OFF);
447 pos = dma_ring_incr(pos, RX_DESC_NUM);
448 }
449
450 /* ensure desc updated */
451 wmb();
452
453 if (pos != start)
454 writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
455}
456
457static int hix5hd2_rx(struct net_device *dev, int limit)
458{
459 struct hix5hd2_priv *priv = netdev_priv(dev);
460 struct sk_buff *skb;
461 struct hix5hd2_desc *desc;
462 dma_addr_t addr;
463 u32 start, end, num, pos, i, len;
464
465 /* software read pointer */
466 start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
467 /* logic write pointer */
468 end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
469 num = CIRC_CNT(end, start, RX_DESC_NUM);
470 if (num > limit)
471 num = limit;
472
473 /* ensure get updated desc */
474 rmb();
475 for (i = 0, pos = start; i < num; i++) {
476 skb = priv->rx_skb[pos];
477 if (unlikely(!skb)) {
478 netdev_err(dev, "inconsistent rx_skb\n");
479 break;
480 }
481 priv->rx_skb[pos] = NULL;
482
483 desc = priv->rx_bq.desc + pos;
484 len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
485 DESC_DATA_MASK;
486 addr = le32_to_cpu(desc->buff_addr);
487 dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
488 DMA_FROM_DEVICE);
489
490 skb_put(skb, len);
491 if (skb->len > MAC_MAX_FRAME_SIZE) {
492 netdev_err(dev, "rcv len err, len = %d\n", skb->len);
493 dev->stats.rx_errors++;
494 dev->stats.rx_length_errors++;
495 dev_kfree_skb_any(skb);
496 goto next;
497 }
498
499 skb->protocol = eth_type_trans(skb, dev);
500 napi_gro_receive(&priv->napi, skb);
501 dev->stats.rx_packets++;
502 dev->stats.rx_bytes += skb->len;
503 dev->last_rx = jiffies;
504next:
505 pos = dma_ring_incr(pos, RX_DESC_NUM);
506 }
507
508 if (pos != start)
509 writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
510
511 hix5hd2_rx_refill(priv);
512
513 return num;
514}
515
516static void hix5hd2_xmit_reclaim(struct net_device *dev)
517{
518 struct sk_buff *skb;
519 struct hix5hd2_desc *desc;
520 struct hix5hd2_priv *priv = netdev_priv(dev);
521 unsigned int bytes_compl = 0, pkts_compl = 0;
522 u32 start, end, num, pos, i;
523 dma_addr_t addr;
524
525 netif_tx_lock(dev);
526
527 /* software read */
528 start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
529 /* logic write */
530 end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
531 num = CIRC_CNT(end, start, TX_DESC_NUM);
532
533 for (i = 0, pos = start; i < num; i++) {
534 skb = priv->tx_skb[pos];
535 if (unlikely(!skb)) {
536 netdev_err(dev, "inconsistent tx_skb\n");
537 break;
538 }
539
540 pkts_compl++;
541 bytes_compl += skb->len;
542 desc = priv->tx_rq.desc + pos;
543 addr = le32_to_cpu(desc->buff_addr);
544 dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
545 priv->tx_skb[pos] = NULL;
546 dev_consume_skb_any(skb);
547 pos = dma_ring_incr(pos, TX_DESC_NUM);
548 }
549
550 if (pos != start)
551 writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
552
553 netif_tx_unlock(dev);
554
555 if (pkts_compl || bytes_compl)
556 netdev_completed_queue(dev, pkts_compl, bytes_compl);
557
558 if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
559 netif_wake_queue(priv->netdev);
560}
561
562static int hix5hd2_poll(struct napi_struct *napi, int budget)
563{
564 struct hix5hd2_priv *priv = container_of(napi,
565 struct hix5hd2_priv, napi);
566 struct net_device *dev = priv->netdev;
567 int work_done = 0, task = budget;
568 int ints, num;
569
570 do {
571 hix5hd2_xmit_reclaim(dev);
572 num = hix5hd2_rx(dev, task);
573 work_done += num;
574 task -= num;
575 if ((work_done >= budget) || (num == 0))
576 break;
577
578 ints = readl_relaxed(priv->base + RAW_PMU_INT);
579 writel_relaxed(ints, priv->base + RAW_PMU_INT);
580 } while (ints & DEF_INT_MASK);
581
582 if (work_done < budget) {
583 napi_complete(napi);
584 hix5hd2_irq_enable(priv);
585 }
586
587 return work_done;
588}
589
590static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
591{
592 struct net_device *dev = (struct net_device *)dev_id;
593 struct hix5hd2_priv *priv = netdev_priv(dev);
594 int ints = readl_relaxed(priv->base + RAW_PMU_INT);
595
596 writel_relaxed(ints, priv->base + RAW_PMU_INT);
597 if (likely(ints & DEF_INT_MASK)) {
598 hix5hd2_irq_disable(priv);
599 napi_schedule(&priv->napi);
600 }
601
602 return IRQ_HANDLED;
603}
604
605static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
606{
607 struct hix5hd2_priv *priv = netdev_priv(dev);
608 struct hix5hd2_desc *desc;
609 dma_addr_t addr;
610 u32 pos;
611
612 /* software write pointer */
613 pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
614 if (unlikely(priv->tx_skb[pos])) {
615 dev->stats.tx_dropped++;
616 dev->stats.tx_fifo_errors++;
617 netif_stop_queue(dev);
618 return NETDEV_TX_BUSY;
619 }
620
621 addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
622 if (dma_mapping_error(priv->dev, addr)) {
623 dev_kfree_skb_any(skb);
624 return NETDEV_TX_OK;
625 }
626
627 desc = priv->tx_bq.desc + pos;
628 desc->buff_addr = cpu_to_le32(addr);
629 priv->tx_skb[pos] = skb;
630 desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
631 (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
632 (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
633
634 /* ensure desc updated */
635 wmb();
636
637 pos = dma_ring_incr(pos, TX_DESC_NUM);
638 writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
639
640 dev->trans_start = jiffies;
641 dev->stats.tx_packets++;
642 dev->stats.tx_bytes += skb->len;
643 netdev_sent_queue(dev, skb->len);
644
645 return NETDEV_TX_OK;
646}
647
648static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
649{
650 struct hix5hd2_desc *desc;
651 dma_addr_t addr;
652 int i;
653
654 for (i = 0; i < RX_DESC_NUM; i++) {
655 struct sk_buff *skb = priv->rx_skb[i];
656 if (skb == NULL)
657 continue;
658
659 desc = priv->rx_fq.desc + i;
660 addr = le32_to_cpu(desc->buff_addr);
661 dma_unmap_single(priv->dev, addr,
662 MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb);
664 priv->rx_skb[i] = NULL;
665 }
666
667 for (i = 0; i < TX_DESC_NUM; i++) {
668 struct sk_buff *skb = priv->tx_skb[i];
669 if (skb == NULL)
670 continue;
671
672 desc = priv->tx_rq.desc + i;
673 addr = le32_to_cpu(desc->buff_addr);
674 dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
675 dev_kfree_skb_any(skb);
676 priv->tx_skb[i] = NULL;
677 }
678}
679
680static int hix5hd2_net_open(struct net_device *dev)
681{
682 struct hix5hd2_priv *priv = netdev_priv(dev);
683 int ret;
684
685 ret = clk_prepare_enable(priv->clk);
686 if (ret < 0) {
687 netdev_err(dev, "failed to enable clk %d\n", ret);
688 return ret;
689 }
690
691 priv->phy = of_phy_connect(dev, priv->phy_node,
692 &hix5hd2_adjust_link, 0, priv->phy_mode);
693 if (!priv->phy)
694 return -ENODEV;
695
696 phy_start(priv->phy);
697 hix5hd2_hw_init(priv);
698 hix5hd2_rx_refill(priv);
699
700 netdev_reset_queue(dev);
701 netif_start_queue(dev);
702 napi_enable(&priv->napi);
703
704 hix5hd2_port_enable(priv);
705 hix5hd2_irq_enable(priv);
706
707 return 0;
708}
709
710static int hix5hd2_net_close(struct net_device *dev)
711{
712 struct hix5hd2_priv *priv = netdev_priv(dev);
713
714 hix5hd2_port_disable(priv);
715 hix5hd2_irq_disable(priv);
716 napi_disable(&priv->napi);
717 netif_stop_queue(dev);
718 hix5hd2_free_dma_desc_rings(priv);
719
720 if (priv->phy) {
721 phy_stop(priv->phy);
722 phy_disconnect(priv->phy);
723 }
724
725 clk_disable_unprepare(priv->clk);
726
727 return 0;
728}
729
730static void hix5hd2_tx_timeout_task(struct work_struct *work)
731{
732 struct hix5hd2_priv *priv;
733
734 priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
735 hix5hd2_net_close(priv->netdev);
736 hix5hd2_net_open(priv->netdev);
737}
738
739static void hix5hd2_net_timeout(struct net_device *dev)
740{
741 struct hix5hd2_priv *priv = netdev_priv(dev);
742
743 schedule_work(&priv->tx_timeout_task);
744}
745
746static const struct net_device_ops hix5hd2_netdev_ops = {
747 .ndo_open = hix5hd2_net_open,
748 .ndo_stop = hix5hd2_net_close,
749 .ndo_start_xmit = hix5hd2_net_xmit,
750 .ndo_tx_timeout = hix5hd2_net_timeout,
751 .ndo_set_mac_address = hix5hd2_net_set_mac_address,
752};
753
754static int hix5hd2_get_settings(struct net_device *net_dev,
755 struct ethtool_cmd *cmd)
756{
757 struct hix5hd2_priv *priv = netdev_priv(net_dev);
758
759 if (!priv->phy)
760 return -ENODEV;
761
762 return phy_ethtool_gset(priv->phy, cmd);
763}
764
765static int hix5hd2_set_settings(struct net_device *net_dev,
766 struct ethtool_cmd *cmd)
767{
768 struct hix5hd2_priv *priv = netdev_priv(net_dev);
769
770 if (!priv->phy)
771 return -ENODEV;
772
773 return phy_ethtool_sset(priv->phy, cmd);
774}
775
776static struct ethtool_ops hix5hd2_ethtools_ops = {
777 .get_link = ethtool_op_get_link,
778 .get_settings = hix5hd2_get_settings,
779 .set_settings = hix5hd2_set_settings,
780};
781
782static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
783{
784 struct hix5hd2_priv *priv = bus->priv;
785 void __iomem *base = priv->base;
786 int i, timeout = 10000;
787
788 for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
789 if (i == timeout)
790 return -ETIMEDOUT;
791 usleep_range(10, 20);
792 }
793
794 return 0;
795}
796
797static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
798{
799 struct hix5hd2_priv *priv = bus->priv;
800 void __iomem *base = priv->base;
801 int val, ret;
802
803 ret = hix5hd2_mdio_wait_ready(bus);
804 if (ret < 0)
805 goto out;
806
807 writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
808 ret = hix5hd2_mdio_wait_ready(bus);
809 if (ret < 0)
810 goto out;
811
812 val = readl_relaxed(base + MDIO_RDATA_STATUS);
813 if (val & MDIO_R_VALID) {
814 dev_err(bus->parent, "SMI bus read not valid\n");
815 ret = -ENODEV;
816 goto out;
817 }
818
819 val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
820 ret = (val >> 16) & 0xFFFF;
821out:
822 return ret;
823}
824
825static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
826{
827 struct hix5hd2_priv *priv = bus->priv;
828 void __iomem *base = priv->base;
829 int ret;
830
831 ret = hix5hd2_mdio_wait_ready(bus);
832 if (ret < 0)
833 goto out;
834
835 writel_relaxed(val, base + MDIO_SINGLE_DATA);
836 writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
837 ret = hix5hd2_mdio_wait_ready(bus);
838out:
839 return ret;
840}
841
842static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
843{
844 int i;
845
846 for (i = 0; i < QUEUE_NUMS; i++) {
847 if (priv->pool[i].desc) {
848 dma_free_coherent(priv->dev, priv->pool[i].size,
849 priv->pool[i].desc,
850 priv->pool[i].phys_addr);
851 priv->pool[i].desc = NULL;
852 }
853 }
854}
855
856static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
857{
858 struct device *dev = priv->dev;
859 struct hix5hd2_desc *virt_addr;
860 dma_addr_t phys_addr;
861 int size, i;
862
863 priv->rx_fq.count = RX_DESC_NUM;
864 priv->rx_bq.count = RX_DESC_NUM;
865 priv->tx_bq.count = TX_DESC_NUM;
866 priv->tx_rq.count = TX_DESC_NUM;
867
868 for (i = 0; i < QUEUE_NUMS; i++) {
869 size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
870 virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
871 GFP_KERNEL);
872 if (virt_addr == NULL)
873 goto error_free_pool;
874
875 memset(virt_addr, 0, size);
876 priv->pool[i].size = size;
877 priv->pool[i].desc = virt_addr;
878 priv->pool[i].phys_addr = phys_addr;
879 }
880 return 0;
881
882error_free_pool:
883 hix5hd2_destroy_hw_desc_queue(priv);
884
885 return -ENOMEM;
886}
887
888static int hix5hd2_dev_probe(struct platform_device *pdev)
889{
890 struct device *dev = &pdev->dev;
891 struct device_node *node = dev->of_node;
892 struct net_device *ndev;
893 struct hix5hd2_priv *priv;
894 struct resource *res;
895 struct mii_bus *bus;
896 const char *mac_addr;
897 int ret;
898
899 ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
900 if (!ndev)
901 return -ENOMEM;
902
903 platform_set_drvdata(pdev, ndev);
904
905 priv = netdev_priv(ndev);
906 priv->dev = dev;
907 priv->netdev = ndev;
908
909 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
910 priv->base = devm_ioremap_resource(dev, res);
911 if (IS_ERR(priv->base)) {
912 ret = PTR_ERR(priv->base);
913 goto out_free_netdev;
914 }
915
916 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
917 priv->ctrl_base = devm_ioremap_resource(dev, res);
918 if (IS_ERR(priv->ctrl_base)) {
919 ret = PTR_ERR(priv->ctrl_base);
920 goto out_free_netdev;
921 }
922
923 priv->clk = devm_clk_get(&pdev->dev, NULL);
924 if (IS_ERR(priv->clk)) {
925 netdev_err(ndev, "failed to get clk\n");
926 ret = -ENODEV;
927 goto out_free_netdev;
928 }
929
930 ret = clk_prepare_enable(priv->clk);
931 if (ret < 0) {
932 netdev_err(ndev, "failed to enable clk %d\n", ret);
933 goto out_free_netdev;
934 }
935
936 bus = mdiobus_alloc();
937 if (bus == NULL) {
938 ret = -ENOMEM;
939 goto out_free_netdev;
940 }
941
942 bus->priv = priv;
943 bus->name = "hix5hd2_mii_bus";
944 bus->read = hix5hd2_mdio_read;
945 bus->write = hix5hd2_mdio_write;
946 bus->parent = &pdev->dev;
947 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
948 priv->bus = bus;
949
950 ret = of_mdiobus_register(bus, node);
951 if (ret)
952 goto err_free_mdio;
953
954 priv->phy_mode = of_get_phy_mode(node);
955 if (priv->phy_mode < 0) {
956 netdev_err(ndev, "not find phy-mode\n");
957 ret = -EINVAL;
958 goto err_mdiobus;
959 }
960
961 priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
962 if (!priv->phy_node) {
963 netdev_err(ndev, "not find phy-handle\n");
964 ret = -EINVAL;
965 goto err_mdiobus;
966 }
967
968 ndev->irq = platform_get_irq(pdev, 0);
969 if (ndev->irq <= 0) {
970 netdev_err(ndev, "No irq resource\n");
971 ret = -EINVAL;
972 goto out_phy_node;
973 }
974
975 ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
976 0, pdev->name, ndev);
977 if (ret) {
978 netdev_err(ndev, "devm_request_irq failed\n");
979 goto out_phy_node;
980 }
981
982 mac_addr = of_get_mac_address(node);
983 if (mac_addr)
984 ether_addr_copy(ndev->dev_addr, mac_addr);
985 if (!is_valid_ether_addr(ndev->dev_addr)) {
986 eth_hw_addr_random(ndev);
987 netdev_warn(ndev, "using random MAC address %pM\n",
988 ndev->dev_addr);
989 }
990
991 INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
992 ndev->watchdog_timeo = 6 * HZ;
993 ndev->priv_flags |= IFF_UNICAST_FLT;
994 ndev->netdev_ops = &hix5hd2_netdev_ops;
995 ndev->ethtool_ops = &hix5hd2_ethtools_ops;
996 SET_NETDEV_DEV(ndev, dev);
997
998 ret = hix5hd2_init_hw_desc_queue(priv);
999 if (ret)
1000 goto out_phy_node;
1001
1002 netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
1003 ret = register_netdev(priv->netdev);
1004 if (ret) {
1005 netdev_err(ndev, "register_netdev failed!");
1006 goto out_destroy_queue;
1007 }
1008
1009 clk_disable_unprepare(priv->clk);
1010
1011 return ret;
1012
1013out_destroy_queue:
1014 netif_napi_del(&priv->napi);
1015 hix5hd2_destroy_hw_desc_queue(priv);
1016out_phy_node:
1017 of_node_put(priv->phy_node);
1018err_mdiobus:
1019 mdiobus_unregister(bus);
1020err_free_mdio:
1021 mdiobus_free(bus);
1022out_free_netdev:
1023 free_netdev(ndev);
1024
1025 return ret;
1026}
1027
1028static int hix5hd2_dev_remove(struct platform_device *pdev)
1029{
1030 struct net_device *ndev = platform_get_drvdata(pdev);
1031 struct hix5hd2_priv *priv = netdev_priv(ndev);
1032
1033 netif_napi_del(&priv->napi);
1034 unregister_netdev(ndev);
1035 mdiobus_unregister(priv->bus);
1036 mdiobus_free(priv->bus);
1037
1038 hix5hd2_destroy_hw_desc_queue(priv);
1039 of_node_put(priv->phy_node);
1040 cancel_work_sync(&priv->tx_timeout_task);
1041 free_netdev(ndev);
1042
1043 return 0;
1044}
1045
1046static const struct of_device_id hix5hd2_of_match[] = {
1047 {.compatible = "hisilicon,hix5hd2-gmac",},
1048 {},
1049};
1050
1051MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
1052
1053static struct platform_driver hix5hd2_dev_driver = {
1054 .driver = {
1055 .name = "hix5hd2-gmac",
1056 .of_match_table = hix5hd2_of_match,
1057 },
1058 .probe = hix5hd2_dev_probe,
1059 .remove = hix5hd2_dev_remove,
1060};
1061
1062module_platform_driver(hix5hd2_dev_driver);
1063
1064MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
1065MODULE_LICENSE("GPL v2");
1066MODULE_ALIAS("platform:hix5hd2-gmac");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 95837b99a464..85a3866459cf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -63,8 +63,8 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
63 cmd->duplex = port->full_duplex == 1 ? 63 cmd->duplex = port->full_duplex == 1 ?
64 DUPLEX_FULL : DUPLEX_HALF; 64 DUPLEX_FULL : DUPLEX_HALF;
65 } else { 65 } else {
66 speed = ~0; 66 speed = SPEED_UNKNOWN;
67 cmd->duplex = -1; 67 cmd->duplex = DUPLEX_UNKNOWN;
68 } 68 }
69 ethtool_cmd_speed_set(cmd, speed); 69 ethtool_cmd_speed_set(cmd, speed);
70 70
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
278 278
279void ehea_set_ethtool_ops(struct net_device *netdev) 279void ehea_set_ethtool_ops(struct net_device *netdev)
280{ 280{
281 SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops); 281 netdev->ethtool_ops = &ehea_ethtool_ops;
282} 282}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 538903bf13bc..a0b418e007a0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -28,6 +28,7 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/device.h>
31#include <linux/in.h> 32#include <linux/in.h>
32#include <linux/ip.h> 33#include <linux/ip.h>
33#include <linux/tcp.h> 34#include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
3273 return -EINVAL; 3274 return -EINVAL;
3274 } 3275 }
3275 3276
3276 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3277 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3277 if (!adapter) { 3278 if (!adapter) {
3278 ret = -ENOMEM; 3279 ret = -ENOMEM;
3279 dev_err(&dev->dev, "no mem for ehea_adapter\n"); 3280 dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
3359 3360
3360out_free_ad: 3361out_free_ad:
3361 list_del(&adapter->list); 3362 list_del(&adapter->list);
3362 kfree(adapter);
3363 3363
3364out: 3364out:
3365 ehea_update_firmware_handles(); 3365 ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
3386 ehea_destroy_eq(adapter->neq); 3386 ehea_destroy_eq(adapter->neq);
3387 ehea_remove_adapter_mr(adapter); 3387 ehea_remove_adapter_mr(adapter);
3388 list_del(&adapter->list); 3388 list_del(&adapter->list);
3389 kfree(adapter);
3390 3389
3391 ehea_update_firmware_handles(); 3390 ehea_update_firmware_handles();
3392 3391
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 9b03033bb557..a0820f72b25c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -103,12 +103,14 @@ out_nomem:
103 103
104static void hw_queue_dtor(struct hw_queue *queue) 104static void hw_queue_dtor(struct hw_queue *queue)
105{ 105{
106 int pages_per_kpage = PAGE_SIZE / queue->pagesize; 106 int pages_per_kpage;
107 int i, nr_pages; 107 int i, nr_pages;
108 108
109 if (!queue || !queue->queue_pages) 109 if (!queue || !queue->queue_pages)
110 return; 110 return;
111 111
112 pages_per_kpage = PAGE_SIZE / queue->pagesize;
113
112 nr_pages = queue->queue_length / queue->pagesize; 114 nr_pages = queue->queue_length / queue->pagesize;
113 115
114 for (i = 0; i < nr_pages; i += pages_per_kpage) 116 for (i = 0; i < nr_pages; i += pages_per_kpage)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ae342fdb42c8..87bd953cc2ee 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
2879 dev->commac.ops = &emac_commac_sg_ops; 2879 dev->commac.ops = &emac_commac_sg_ops;
2880 } else 2880 } else
2881 ndev->netdev_ops = &emac_netdev_ops; 2881 ndev->netdev_ops = &emac_netdev_ops;
2882 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2882 ndev->ethtool_ops = &emac_ethtool_ops;
2883 2883
2884 netif_carrier_off(ndev); 2884 netif_carrier_off(ndev);
2885 2885
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 25045ae07171..5727779a7df2 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2245 */ 2245 */
2246 dev->netdev_ops = &ipg_netdev_ops; 2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev); 2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249 2249
2250 rc = pci_request_regions(pdev, DRV_NAME); 2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc) 2251 if (rc)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index b56461ce674c..9d979d7debef 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2854 netdev->hw_features |= NETIF_F_RXALL; 2854 netdev->hw_features |= NETIF_F_RXALL;
2855 2855
2856 netdev->netdev_ops = &e100_netdev_ops; 2856 netdev->netdev_ops = &e100_netdev_ops;
2857 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2857 netdev->ethtool_ops = &e100_ethtool_ops;
2858 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; 2858 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2859 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2859 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2860 2860
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aeefb92a..d50f78afb56d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
168 else 168 else
169 ecmd->duplex = DUPLEX_HALF; 169 ecmd->duplex = DUPLEX_HALF;
170 } else { 170 } else {
171 ethtool_cmd_speed_set(ecmd, -1); 171 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
172 ecmd->duplex = -1; 172 ecmd->duplex = DUPLEX_UNKNOWN;
173 } 173 }
174 174
175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || 175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1460 * enough time to complete the receives, if it's 1460 * enough time to complete the receives, if it's
1461 * exceeded, break and error off 1461 * exceeded, break and error off
1462 */ 1462 */
1463 } while (good_cnt < 64 && jiffies < (time + 20)); 1463 } while (good_cnt < 64 && time_after(time + 20, jiffies));
1464
1464 if (good_cnt != 64) { 1465 if (good_cnt != 64) {
1465 ret_val = 13; /* ret_val is the same as mis-compare */ 1466 ret_val = 13; /* ret_val is the same as mis-compare */
1466 break; 1467 break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1905 1906
1906void e1000_set_ethtool_ops(struct net_device *netdev) 1907void e1000_set_ethtool_ops(struct net_device *netdev)
1907{ 1908{
1908 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); 1909 netdev->ethtool_ops = &e1000_ethtool_ops;
1909} 1910}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c1d3fdb296a0..e9b07ccc0eba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
4877 * since the test for a multicast frame will test positive on 4877 * since the test for a multicast frame will test positive on
4878 * a broadcast frame. 4878 * a broadcast frame.
4879 */ 4879 */
4880 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) 4880 if (is_broadcast_ether_addr(mac_addr))
4881 /* Broadcast packet */ 4881 /* Broadcast packet */
4882 stats->bprc++; 4882 stats->bprc++;
4883 else if (*mac_addr & 0x01) 4883 else if (is_multicast_ether_addr(mac_addr))
4884 /* Multicast packet */ 4884 /* Multicast packet */
4885 stats->mprc++; 4885 stats->mprc++;
4886 4886
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 27058dfe418b..660971f304b2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3105,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3105 */ 3105 */
3106 tx_ring = adapter->tx_ring; 3106 tx_ring = adapter->tx_ring;
3107 3107
3108 if (unlikely(skb->len <= 0)) {
3109 dev_kfree_skb_any(skb);
3110 return NETDEV_TX_OK;
3111 }
3112
3113 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3108 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3114 * packets may get corrupted during padding by HW. 3109 * packets may get corrupted during padding by HW.
3115 * To WA this issue, pad all small packets manually. 3110 * To WA this issue, pad all small packets manually.
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a5f6b11d6992..08f22f348800 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
1365 .setup_led = e1000e_setup_led_generic, 1365 .setup_led = e1000e_setup_led_generic,
1366 .config_collision_dist = e1000e_config_collision_dist_generic, 1366 .config_collision_dist = e1000e_config_collision_dist_generic,
1367 .rar_set = e1000e_rar_set_generic, 1367 .rar_set = e1000e_rar_set_generic,
1368 .rar_get_count = e1000e_rar_get_count_generic,
1368}; 1369};
1369 1370
1370static const struct e1000_phy_operations es2_phy_ops = { 1371static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index e0aa7f1efb08..218481e509f9 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
1896 .config_collision_dist = e1000e_config_collision_dist_generic, 1896 .config_collision_dist = e1000e_config_collision_dist_generic,
1897 .read_mac_addr = e1000_read_mac_addr_82571, 1897 .read_mac_addr = e1000_read_mac_addr_82571,
1898 .rar_set = e1000e_rar_set_generic, 1898 .rar_set = e1000e_rar_set_generic,
1899 .rar_get_count = e1000e_rar_get_count_generic,
1899}; 1900};
1900 1901
1901static const struct e1000_phy_operations e82_phy_ops_igp = { 1902static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..7785240a0da1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
265 u32 tx_hwtstamp_timeouts; 265 u32 tx_hwtstamp_timeouts;
266 266
267 /* Rx */ 267 /* Rx */
268 bool (*clean_rx) (struct e1000_ring *ring, int *work_done, 268 bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
269 int work_to_do) ____cacheline_aligned_in_smp; 269 int work_to_do) ____cacheline_aligned_in_smp;
270 void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, 270 void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
271 gfp_t gfp); 271 gfp_t gfp);
272 struct e1000_ring *rx_ring; 272 struct e1000_ring *rx_ring;
273 273
274 u32 rx_int_delay; 274 u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
391 * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours 391 * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
392 */ 392 */
393#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) 393#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
394#define E1000_MAX_82574_SYSTIM_REREADS 50
395#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
394 396
395/* hardware capability, feature, and workaround flags */ 397/* hardware capability, feature, and workaround flags */
396#define FLAG_HAS_AMT (1 << 0) 398#define FLAG_HAS_AMT (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
573 575
574#define er32(reg) __er32(hw, E1000_##reg) 576#define er32(reg) __er32(hw, E1000_##reg)
575 577
576/** 578s32 __ew32_prepare(struct e1000_hw *hw);
577 * __ew32_prepare - prepare to write to MAC CSR register on certain parts 579void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
578 * @hw: pointer to the HW structure
579 *
580 * When updating the MAC CSR registers, the Manageability Engine (ME) could
581 * be accessing the registers at the same time. Normally, this is handled in
582 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
583 * accesses later than it should which could result in the register to have
584 * an incorrect value. Workaround this by checking the FWSM register which
585 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
586 * and try again a number of times.
587 **/
588static inline s32 __ew32_prepare(struct e1000_hw *hw)
589{
590 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
591
592 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
593 udelay(50);
594
595 return i;
596}
597
598static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
599{
600 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
601 __ew32_prepare(hw);
602
603 writel(val, hw->hw_addr + reg);
604}
605 580
606#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) 581#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
607 582
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..815e26c6d34b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -159,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
159 ecmd->transceiver = XCVR_EXTERNAL; 159 ecmd->transceiver = XCVR_EXTERNAL;
160 } 160 }
161 161
162 speed = -1; 162 speed = SPEED_UNKNOWN;
163 ecmd->duplex = -1; 163 ecmd->duplex = DUPLEX_UNKNOWN;
164 164
165 if (netif_running(netdev)) { 165 if (netif_running(netdev)) {
166 if (netif_carrier_ok(netdev)) { 166 if (netif_carrier_ok(netdev)) {
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
169 } 169 }
170 } else if (!pm_runtime_suspended(netdev->dev.parent)) { 170 } else if (!pm_runtime_suspended(netdev->dev.parent)) {
171 u32 status = er32(STATUS); 171 u32 status = er32(STATUS);
172
172 if (status & E1000_STATUS_LU) { 173 if (status & E1000_STATUS_LU) {
173 if (status & E1000_STATUS_SPEED_1000) 174 if (status & E1000_STATUS_SPEED_1000)
174 speed = SPEED_1000; 175 speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
783 reg + (offset << 2), val, 784 reg + (offset << 2), val,
784 (test[pat] & write & mask)); 785 (test[pat] & write & mask));
785 *data = reg; 786 *data = reg;
786 return 1; 787 return true;
787 } 788 }
788 } 789 }
789 return 0; 790 return false;
790} 791}
791 792
792static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, 793static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
793 int reg, u32 mask, u32 write) 794 int reg, u32 mask, u32 write)
794{ 795{
795 u32 val; 796 u32 val;
797
796 __ew32(&adapter->hw, reg, write & mask); 798 __ew32(&adapter->hw, reg, write & mask);
797 val = __er32(&adapter->hw, reg); 799 val = __er32(&adapter->hw, reg);
798 if ((write & mask) != (val & mask)) { 800 if ((write & mask) != (val & mask)) {
799 e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", 801 e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
800 reg, (val & mask), (write & mask)); 802 reg, (val & mask), (write & mask));
801 *data = reg; 803 *data = reg;
802 return 1; 804 return true;
803 } 805 }
804 return 0; 806 return false;
805} 807}
806 808
807#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ 809#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1717 *data = 0; 1719 *data = 0;
1718 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1720 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1719 int i = 0; 1721 int i = 0;
1722
1720 hw->mac.serdes_has_link = false; 1723 hw->mac.serdes_has_link = false;
1721 1724
1722 /* On some blade server designs, link establishment 1725 /* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2315 2318
2316void e1000e_set_ethtool_ops(struct net_device *netdev) 2319void e1000e_set_ethtool_ops(struct net_device *netdev)
2317{ 2320{
2318 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); 2321 netdev->ethtool_ops = &e1000_ethtool_ops;
2319} 2322}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 6b3de5f39a97..72f5475c4b90 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
469 s32 (*setup_led)(struct e1000_hw *); 469 s32 (*setup_led)(struct e1000_hw *);
470 void (*write_vfta)(struct e1000_hw *, u32, u32); 470 void (*write_vfta)(struct e1000_hw *, u32, u32);
471 void (*config_collision_dist)(struct e1000_hw *); 471 void (*config_collision_dist)(struct e1000_hw *);
472 void (*rar_set)(struct e1000_hw *, u8 *, u32); 472 int (*rar_set)(struct e1000_hw *, u8 *, u32);
473 s32 (*read_mac_addr)(struct e1000_hw *); 473 s32 (*read_mac_addr)(struct e1000_hw *);
474 u32 (*rar_get_count)(struct e1000_hw *);
474}; 475};
475 476
476/* When to use various PHY register access functions: 477/* When to use various PHY register access functions:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..8894ab8ed6bd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
139static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 139static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
140static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 140static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
141static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 141static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
142static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 142static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
143static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 143static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
144static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
144static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 145static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
145static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 146static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
146static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); 147static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -704,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
704 mac->ops.rar_set = e1000_rar_set_pch_lpt; 705 mac->ops.rar_set = e1000_rar_set_pch_lpt;
705 mac->ops.setup_physical_interface = 706 mac->ops.setup_physical_interface =
706 e1000_setup_copper_link_pch_lpt; 707 e1000_setup_copper_link_pch_lpt;
708 mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
707 } 709 }
708 710
709 /* Enable PCS Lock-loss workaround for ICH8 */ 711 /* Enable PCS Lock-loss workaround for ICH8 */
@@ -1334,6 +1336,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1334 if (((hw->mac.type == e1000_pch2lan) || 1336 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) { 1337 (hw->mac.type == e1000_pch_lpt)) && link) {
1336 u32 reg; 1338 u32 reg;
1339
1337 reg = er32(STATUS); 1340 reg = er32(STATUS);
1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1341 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr; 1342 u16 emi_addr;
@@ -1634,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1634 u32 fwsm; 1637 u32 fwsm;
1635 1638
1636 fwsm = er32(FWSM); 1639 fwsm = er32(FWSM);
1637 return ((fwsm & E1000_ICH_FWSM_FW_VALID) && 1640 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1638 ((fwsm & E1000_FWSM_MODE_MASK) == 1641 ((fwsm & E1000_FWSM_MODE_MASK) ==
1639 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); 1642 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1640} 1643}
1641 1644
1642/** 1645/**
@@ -1667,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1667 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 1670 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1668 * Use SHRA[0-3] in place of those reserved for ME. 1671 * Use SHRA[0-3] in place of those reserved for ME.
1669 **/ 1672 **/
1670static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 1673static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1671{ 1674{
1672 u32 rar_low, rar_high; 1675 u32 rar_low, rar_high;
1673 1676
@@ -1689,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1689 e1e_flush(); 1692 e1e_flush();
1690 ew32(RAH(index), rar_high); 1693 ew32(RAH(index), rar_high);
1691 e1e_flush(); 1694 e1e_flush();
1692 return; 1695 return 0;
1693 } 1696 }
1694 1697
1695 /* RAR[1-6] are owned by manageability. Skip those and program the 1698 /* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1712,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1712 /* verify the register updates */ 1715 /* verify the register updates */
1713 if ((er32(SHRAL(index - 1)) == rar_low) && 1716 if ((er32(SHRAL(index - 1)) == rar_low) &&
1714 (er32(SHRAH(index - 1)) == rar_high)) 1717 (er32(SHRAH(index - 1)) == rar_high))
1715 return; 1718 return 0;
1716 1719
1717 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 1720 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1718 (index - 1), er32(FWSM)); 1721 (index - 1), er32(FWSM));
@@ -1720,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1720 1723
1721out: 1724out:
1722 e_dbg("Failed to write receive address at index %d\n", index); 1725 e_dbg("Failed to write receive address at index %d\n", index);
1726 return -E1000_ERR_CONFIG;
1727}
1728
1729/**
1730 * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1731 * @hw: pointer to the HW structure
1732 *
1733 * Get the number of available receive registers that the Host can
1734 * program. SHRA[0-10] are the shared receive address registers
1735 * that are shared between the Host and manageability engine (ME).
1736 * ME can reserve any number of addresses and the host needs to be
1737 * able to tell how many available registers it has access to.
1738 **/
1739static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1740{
1741 u32 wlock_mac;
1742 u32 num_entries;
1743
1744 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1745 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1746
1747 switch (wlock_mac) {
1748 case 0:
1749 /* All SHRA[0..10] and RAR[0] available */
1750 num_entries = hw->mac.rar_entry_count;
1751 break;
1752 case 1:
1753 /* Only RAR[0] available */
1754 num_entries = 1;
1755 break;
1756 default:
1757 /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1758 num_entries = wlock_mac + 1;
1759 break;
1760 }
1761
1762 return num_entries;
1723} 1763}
1724 1764
1725/** 1765/**
@@ -1733,7 +1773,7 @@ out:
1733 * contain the MAC address. SHRA[0-10] are the shared receive address 1773 * contain the MAC address. SHRA[0-10] are the shared receive address
1734 * registers that are shared between the Host and manageability engine (ME). 1774 * registers that are shared between the Host and manageability engine (ME).
1735 **/ 1775 **/
1736static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 1776static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1737{ 1777{
1738 u32 rar_low, rar_high; 1778 u32 rar_low, rar_high;
1739 u32 wlock_mac; 1779 u32 wlock_mac;
@@ -1755,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1755 e1e_flush(); 1795 e1e_flush();
1756 ew32(RAH(index), rar_high); 1796 ew32(RAH(index), rar_high);
1757 e1e_flush(); 1797 e1e_flush();
1758 return; 1798 return 0;
1759 } 1799 }
1760 1800
1761 /* The manageability engine (ME) can lock certain SHRAR registers that 1801 /* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1787,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1787 /* verify the register updates */ 1827 /* verify the register updates */
1788 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && 1828 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1789 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) 1829 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1790 return; 1830 return 0;
1791 } 1831 }
1792 } 1832 }
1793 1833
1794out: 1834out:
1795 e_dbg("Failed to write receive address at index %d\n", index); 1835 e_dbg("Failed to write receive address at index %d\n", index);
1836 return -E1000_ERR_CONFIG;
1796} 1837}
1797 1838
1798/** 1839/**
@@ -4976,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
4976 /* id_led_init dependent on mac type */ 5017 /* id_led_init dependent on mac type */
4977 .config_collision_dist = e1000e_config_collision_dist_generic, 5018 .config_collision_dist = e1000e_config_collision_dist_generic,
4978 .rar_set = e1000e_rar_set_generic, 5019 .rar_set = e1000e_rar_set_generic,
5020 .rar_get_count = e1000e_rar_get_count_generic,
4979}; 5021};
4980 5022
4981static const struct e1000_phy_operations ich8_phy_ops = { 5023static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index baa0a466d1d0..8c386f3a15eb 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
211 return 0; 211 return 0;
212} 212}
213 213
214u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
215{
216 return hw->mac.rar_entry_count;
217}
218
214/** 219/**
215 * e1000e_rar_set_generic - Set receive address register 220 * e1000e_rar_set_generic - Set receive address register
216 * @hw: pointer to the HW structure 221 * @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
220 * Sets the receive address array register at index to the address passed 225 * Sets the receive address array register at index to the address passed
221 * in by addr. 226 * in by addr.
222 **/ 227 **/
223void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) 228int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
224{ 229{
225 u32 rar_low, rar_high; 230 u32 rar_low, rar_high;
226 231
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
244 e1e_flush(); 249 e1e_flush();
245 ew32(RAH(index), rar_high); 250 ew32(RAH(index), rar_high);
246 e1e_flush(); 251 e1e_flush();
252
253 return 0;
247} 254}
248 255
249/** 256/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 4e81c2825b7a..0513d90cdeea 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
61void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 61void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
62 62
63void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); 63void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
64void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); 64u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
65int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
65void e1000e_config_collision_dist_generic(struct e1000_hw *hw); 66void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
66 67
67#endif 68#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..201cc93f3625 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -124,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
124}; 124};
125 125
126/** 126/**
127 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
128 * @hw: pointer to the HW structure
129 *
130 * When updating the MAC CSR registers, the Manageability Engine (ME) could
131 * be accessing the registers at the same time. Normally, this is handled in
132 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
133 * accesses later than it should which could result in the register to have
134 * an incorrect value. Workaround this by checking the FWSM register which
135 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
136 * and try again a number of times.
137 **/
138s32 __ew32_prepare(struct e1000_hw *hw)
139{
140 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
141
142 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
143 udelay(50);
144
145 return i;
146}
147
148void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
149{
150 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
151 __ew32_prepare(hw);
152
153 writel(val, hw->hw_addr + reg);
154}
155
156/**
127 * e1000_regdump - register printout routine 157 * e1000_regdump - register printout routine
128 * @hw: pointer to the HW structure 158 * @hw: pointer to the HW structure
129 * @reginfo: pointer to the register info table 159 * @reginfo: pointer to the register info table
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
599 629
600 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { 630 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
601 u32 rctl = er32(RCTL); 631 u32 rctl = er32(RCTL);
632
602 ew32(RCTL, rctl & ~E1000_RCTL_EN); 633 ew32(RCTL, rctl & ~E1000_RCTL_EN);
603 e_err("ME firmware caused invalid RDT - resetting\n"); 634 e_err("ME firmware caused invalid RDT - resetting\n");
604 schedule_work(&adapter->reset_task); 635 schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
615 646
616 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { 647 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
617 u32 tctl = er32(TCTL); 648 u32 tctl = er32(TCTL);
649
618 ew32(TCTL, tctl & ~E1000_TCTL_EN); 650 ew32(TCTL, tctl & ~E1000_TCTL_EN);
619 e_err("ME firmware caused invalid TDT - resetting\n"); 651 e_err("ME firmware caused invalid TDT - resetting\n");
620 schedule_work(&adapter->reset_task); 652 schedule_work(&adapter->reset_task);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1198 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1230 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1199 (count < tx_ring->count)) { 1231 (count < tx_ring->count)) {
1200 bool cleaned = false; 1232 bool cleaned = false;
1233
1201 rmb(); /* read buffer_info after eop_desc */ 1234 rmb(); /* read buffer_info after eop_desc */
1202 for (; !cleaned; count++) { 1235 for (; !cleaned; count++) {
1203 tx_desc = E1000_TX_DESC(*tx_ring, i); 1236 tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1753 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1786 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1754 /* disable receives */ 1787 /* disable receives */
1755 u32 rctl = er32(RCTL); 1788 u32 rctl = er32(RCTL);
1789
1756 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1790 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1757 adapter->flags |= FLAG_RESTART_NOW; 1791 adapter->flags |= FLAG_RESTART_NOW;
1758 } 1792 }
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1960 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1994 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1961 if (hw->mac.type == e1000_82574) { 1995 if (hw->mac.type == e1000_82574) {
1962 u32 rfctl = er32(RFCTL); 1996 u32 rfctl = er32(RFCTL);
1997
1963 rfctl |= E1000_RFCTL_ACK_DIS; 1998 rfctl |= E1000_RFCTL_ACK_DIS;
1964 ew32(RFCTL, rfctl); 1999 ew32(RFCTL, rfctl);
1965 } 2000 }
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
2204 2239
2205 if (adapter->msix_entries) { 2240 if (adapter->msix_entries) {
2206 int i; 2241 int i;
2242
2207 for (i = 0; i < adapter->num_vectors; i++) 2243 for (i = 0; i < adapter->num_vectors; i++)
2208 synchronize_irq(adapter->msix_entries[i].vector); 2244 synchronize_irq(adapter->msix_entries[i].vector);
2209 } else { 2245 } else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2921 2957
2922 if (adapter->flags2 & FLAG2_DMA_BURST) { 2958 if (adapter->flags2 & FLAG2_DMA_BURST) {
2923 u32 txdctl = er32(TXDCTL(0)); 2959 u32 txdctl = er32(TXDCTL(0));
2960
2924 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2961 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2925 E1000_TXDCTL_WTHRESH); 2962 E1000_TXDCTL_WTHRESH);
2926 /* set up some performance related parameters to encourage the 2963 /* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3239 3276
3240 if (adapter->flags & FLAG_IS_ICH) { 3277 if (adapter->flags & FLAG_IS_ICH) {
3241 u32 rxdctl = er32(RXDCTL(0)); 3278 u32 rxdctl = er32(RXDCTL(0));
3279
3242 ew32(RXDCTL(0), rxdctl | 0x3); 3280 ew32(RXDCTL(0), rxdctl | 0x3);
3243 } 3281 }
3244 3282
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3303{ 3341{
3304 struct e1000_adapter *adapter = netdev_priv(netdev); 3342 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw; 3343 struct e1000_hw *hw = &adapter->hw;
3306 unsigned int rar_entries = hw->mac.rar_entry_count; 3344 unsigned int rar_entries;
3307 int count = 0; 3345 int count = 0;
3308 3346
3347 rar_entries = hw->mac.ops.rar_get_count(hw);
3348
3309 /* save a rar entry for our hardware address */ 3349 /* save a rar entry for our hardware address */
3310 rar_entries--; 3350 rar_entries--;
3311 3351
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3324 * combining 3364 * combining
3325 */ 3365 */
3326 netdev_for_each_uc_addr(ha, netdev) { 3366 netdev_for_each_uc_addr(ha, netdev) {
3367 int rval;
3368
3327 if (!rar_entries) 3369 if (!rar_entries)
3328 break; 3370 break;
3329 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3371 rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3372 if (rval < 0)
3373 return -ENOMEM;
3330 count++; 3374 count++;
3331 } 3375 }
3332 } 3376 }
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4085 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4129 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4086 cc); 4130 cc);
4087 struct e1000_hw *hw = &adapter->hw; 4131 struct e1000_hw *hw = &adapter->hw;
4088 cycle_t systim; 4132 cycle_t systim, systim_next;
4089 4133
4090 /* latch SYSTIMH on read of SYSTIML */ 4134 /* latch SYSTIMH on read of SYSTIML */
4091 systim = (cycle_t)er32(SYSTIML); 4135 systim = (cycle_t)er32(SYSTIML);
4092 systim |= (cycle_t)er32(SYSTIMH) << 32; 4136 systim |= (cycle_t)er32(SYSTIMH) << 32;
4093 4137
4138 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
4139 u64 incvalue, time_delta, rem, temp;
4140 int i;
4141
4142 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4143 * check to see that the time is incrementing at a reasonable
4144 * rate and is a multiple of incvalue
4145 */
4146 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4147 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4148 /* latch SYSTIMH on read of SYSTIML */
4149 systim_next = (cycle_t)er32(SYSTIML);
4150 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4151
4152 time_delta = systim_next - systim;
4153 temp = time_delta;
4154 rem = do_div(temp, incvalue);
4155
4156 systim = systim_next;
4157
4158 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4159 (rem == 0))
4160 break;
4161 }
4162 }
4094 return systim; 4163 return systim;
4095} 4164}
4096 4165
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
4491 e1000_get_phy_info(hw); 4560 e1000_get_phy_info(hw);
4492 4561
4493 /* Enable EEE on 82579 after link up */ 4562 /* Enable EEE on 82579 after link up */
4494 if (hw->phy.type == e1000_phy_82579) 4563 if (hw->phy.type >= e1000_phy_82579)
4495 e1000_set_eee_pchlan(hw); 4564 e1000_set_eee_pchlan(hw);
4496} 4565}
4497 4566
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4695 /* Correctable ECC Errors */ 4764 /* Correctable ECC Errors */
4696 if (hw->mac.type == e1000_pch_lpt) { 4765 if (hw->mac.type == e1000_pch_lpt) {
4697 u32 pbeccsts = er32(PBECCSTS); 4766 u32 pbeccsts = er32(PBECCSTS);
4767
4698 adapter->corr_errors += 4768 adapter->corr_errors +=
4699 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 4769 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4700 adapter->uncorr_errors += 4770 adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
4808 (adapter->flags & FLAG_RESTART_NOW)) { 4878 (adapter->flags & FLAG_RESTART_NOW)) {
4809 struct e1000_hw *hw = &adapter->hw; 4879 struct e1000_hw *hw = &adapter->hw;
4810 u32 rctl = er32(RCTL); 4880 u32 rctl = er32(RCTL);
4881
4811 ew32(RCTL, rctl | E1000_RCTL_EN); 4882 ew32(RCTL, rctl | E1000_RCTL_EN);
4812 adapter->flags &= ~FLAG_RESTART_NOW; 4883 adapter->flags &= ~FLAG_RESTART_NOW;
4813 } 4884 }
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4930 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 5001 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4931 !txb2b) { 5002 !txb2b) {
4932 u32 tarc0; 5003 u32 tarc0;
5004
4933 tarc0 = er32(TARC(0)); 5005 tarc0 = er32(TARC(0));
4934 tarc0 &= ~SPEED_MODE_BIT; 5006 tarc0 &= ~SPEED_MODE_BIT;
4935 ew32(TARC(0), tarc0); 5007 ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5170 __be16 protocol; 5242 __be16 protocol;
5171 5243
5172 if (skb->ip_summed != CHECKSUM_PARTIAL) 5244 if (skb->ip_summed != CHECKSUM_PARTIAL)
5173 return 0; 5245 return false;
5174 5246
5175 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 5247 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5176 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 5248 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5215 i = 0; 5287 i = 0;
5216 tx_ring->next_to_use = i; 5288 tx_ring->next_to_use = i;
5217 5289
5218 return 1; 5290 return true;
5219} 5291}
5220 5292
5221static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 5293static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6209 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6281 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6210 } else { 6282 } else {
6211 u32 wus = er32(WUS); 6283 u32 wus = er32(WUS);
6284
6212 if (wus) { 6285 if (wus) {
6213 e_info("MAC Wakeup cause - %s\n", 6286 e_info("MAC Wakeup cause - %s\n",
6214 wus & E1000_WUS_EX ? "Unicast Packet" : 6287 wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
7027 .resume = e1000_io_resume, 7100 .resume = e1000_io_resume,
7028}; 7101};
7029 7102
7030static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 7103static const struct pci_device_id e1000_pci_tbl[] = {
7031 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 7104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7032 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 7105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7033 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 7106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
7144static int __init e1000_init_module(void) 7217static int __init e1000_init_module(void)
7145{ 7218{
7146 int ret; 7219 int ret;
7220
7147 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7221 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7148 e1000e_driver_version); 7222 e1000e_driver_version);
7149 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); 7223 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
398 /* Loop to allow for up to whole page write of eeprom */ 398 /* Loop to allow for up to whole page write of eeprom */
399 while (widx < words) { 399 while (widx < words) {
400 u16 word_out = data[widx]; 400 u16 word_out = data[widx];
401
401 word_out = (word_out >> 8) | (word_out << 8); 402 word_out = (word_out >> 8) | (word_out << 8);
402 e1000_shift_out_eec_bits(hw, word_out, 16); 403 e1000_shift_out_eec_bits(hw, word_out, 16);
403 widx++; 404 widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
436 436
437 if (num_IntMode > bd) { 437 if (num_IntMode > bd) {
438 unsigned int int_mode = IntMode[bd]; 438 unsigned int int_mode = IntMode[bd];
439
439 e1000_validate_option(&int_mode, &opt, adapter); 440 e1000_validate_option(&int_mode, &opt, adapter);
440 adapter->int_mode = int_mode; 441 adapter->int_mode = int_mode;
441 } else { 442 } else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
457 458
458 if (num_SmartPowerDownEnable > bd) { 459 if (num_SmartPowerDownEnable > bd) {
459 unsigned int spd = SmartPowerDownEnable[bd]; 460 unsigned int spd = SmartPowerDownEnable[bd];
461
460 e1000_validate_option(&spd, &opt, adapter); 462 e1000_validate_option(&spd, &opt, adapter);
461 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) 463 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
462 adapter->flags |= FLAG_SMART_POWER_DOWN; 464 adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
473 475
474 if (num_CrcStripping > bd) { 476 if (num_CrcStripping > bd) {
475 unsigned int crc_stripping = CrcStripping[bd]; 477 unsigned int crc_stripping = CrcStripping[bd];
478
476 e1000_validate_option(&crc_stripping, &opt, adapter); 479 e1000_validate_option(&crc_stripping, &opt, adapter);
477 if (crc_stripping == OPTION_ENABLED) { 480 if (crc_stripping == OPTION_ENABLED) {
478 adapter->flags2 |= FLAG2_CRC_STRIPPING; 481 adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
495 498
496 if (num_KumeranLockLoss > bd) { 499 if (num_KumeranLockLoss > bd) {
497 unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; 500 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
501
498 e1000_validate_option(&kmrn_lock_loss, &opt, adapter); 502 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
499 enabled = kmrn_lock_loss; 503 enabled = kmrn_lock_loss;
500 } 504 }
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2896 (hw->phy.addr == 2) && 2896 (hw->phy.addr == 2) &&
2897 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { 2897 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
2898 u16 data2 = 0x7EFF; 2898 u16 data2 = 0x7EFF;
2899
2899 ret_val = e1000_access_phy_debug_regs_hv(hw, 2900 ret_val = e1000_access_phy_debug_regs_hv(hw,
2900 (1 << 6) | 0x3, 2901 (1 << 6) | 0x3,
2901 &data2, false); 2902 &data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..65985846345d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
72#define I40E_MIN_NUM_DESCRIPTORS 64 72#define I40E_MIN_NUM_DESCRIPTORS 64
73#define I40E_MIN_MSIX 2 73#define I40E_MIN_MSIX 2
74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ 74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
75#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
75#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ 76#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
76#define I40E_DEFAULT_QUEUES_PER_VF 4 77#define I40E_DEFAULT_QUEUES_PER_VF 4
77#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ 78#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -97,10 +98,6 @@
97#define STRINGIFY(foo) #foo 98#define STRINGIFY(foo) #foo
98#define XSTRINGIFY(bar) STRINGIFY(bar) 99#define XSTRINGIFY(bar) STRINGIFY(bar)
99 100
100#ifndef ARCH_HAS_PREFETCH
101#define prefetch(X)
102#endif
103
104#define I40E_RX_DESC(R, i) \ 101#define I40E_RX_DESC(R, i) \
105 ((ring_is_16byte_desc_enabled(R)) \ 102 ((ring_is_16byte_desc_enabled(R)) \
106 ? (union i40e_32byte_rx_desc *) \ 103 ? (union i40e_32byte_rx_desc *) \
@@ -157,11 +154,23 @@ struct i40e_lump_tracking {
157#define I40E_FDIR_BUFFER_FULL_MARGIN 10 154#define I40E_FDIR_BUFFER_FULL_MARGIN 10
158#define I40E_FDIR_BUFFER_HEAD_ROOM 200 155#define I40E_FDIR_BUFFER_HEAD_ROOM 200
159 156
157enum i40e_fd_stat_idx {
158 I40E_FD_STAT_ATR,
159 I40E_FD_STAT_SB,
160 I40E_FD_STAT_PF_COUNT
161};
162#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
163#define I40E_FD_ATR_STAT_IDX(pf_id) \
164 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
165#define I40E_FD_SB_STAT_IDX(pf_id) \
166 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
167
160struct i40e_fdir_filter { 168struct i40e_fdir_filter {
161 struct hlist_node fdir_node; 169 struct hlist_node fdir_node;
162 /* filter ipnut set */ 170 /* filter ipnut set */
163 u8 flow_type; 171 u8 flow_type;
164 u8 ip4_proto; 172 u8 ip4_proto;
173 /* TX packet view of src and dst */
165 __be32 dst_ip[4]; 174 __be32 dst_ip[4];
166 __be32 src_ip[4]; 175 __be32 src_ip[4];
167 __be16 src_port; 176 __be16 src_port;
@@ -205,7 +214,6 @@ struct i40e_pf {
205 unsigned long state; 214 unsigned long state;
206 unsigned long link_check_timeout; 215 unsigned long link_check_timeout;
207 struct msix_entry *msix_entries; 216 struct msix_entry *msix_entries;
208 u16 num_msix_entries;
209 bool fc_autoneg_status; 217 bool fc_autoneg_status;
210 218
211 u16 eeprom_version; 219 u16 eeprom_version;
@@ -220,11 +228,14 @@ struct i40e_pf {
220 u16 rss_size; /* num queues in the RSS array */ 228 u16 rss_size; /* num queues in the RSS array */
221 u16 rss_size_max; /* HW defined max RSS queues */ 229 u16 rss_size_max; /* HW defined max RSS queues */
222 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ 230 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
231 u16 num_alloc_vsi; /* num VSIs this driver supports */
223 u8 atr_sample_rate; 232 u8 atr_sample_rate;
224 bool wol_en; 233 bool wol_en;
225 234
226 struct hlist_head fdir_filter_list; 235 struct hlist_head fdir_filter_list;
227 u16 fdir_pf_active_filters; 236 u16 fdir_pf_active_filters;
237 u16 fd_sb_cnt_idx;
238 u16 fd_atr_cnt_idx;
228 239
229#ifdef CONFIG_I40E_VXLAN 240#ifdef CONFIG_I40E_VXLAN
230 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 241 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -266,6 +277,7 @@ struct i40e_pf {
266#ifdef CONFIG_I40E_VXLAN 277#ifdef CONFIG_I40E_VXLAN
267#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) 278#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
268#endif 279#endif
280#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
269 281
270 /* tracks features that get auto disabled by errors */ 282 /* tracks features that get auto disabled by errors */
271 u64 auto_disable_flags; 283 u64 auto_disable_flags;
@@ -300,7 +312,6 @@ struct i40e_pf {
300 u16 pf_seid; 312 u16 pf_seid;
301 u16 main_vsi_seid; 313 u16 main_vsi_seid;
302 u16 mac_seid; 314 u16 mac_seid;
303 struct i40e_aqc_get_switch_config_data *sw_config;
304 struct kobject *switch_kobj; 315 struct kobject *switch_kobj;
305#ifdef CONFIG_DEBUG_FS 316#ifdef CONFIG_DEBUG_FS
306 struct dentry *i40e_dbg_pf; 317 struct dentry *i40e_dbg_pf;
@@ -329,9 +340,7 @@ struct i40e_pf {
329 struct ptp_clock *ptp_clock; 340 struct ptp_clock *ptp_clock;
330 struct ptp_clock_info ptp_caps; 341 struct ptp_clock_info ptp_caps;
331 struct sk_buff *ptp_tx_skb; 342 struct sk_buff *ptp_tx_skb;
332 struct work_struct ptp_tx_work;
333 struct hwtstamp_config tstamp_config; 343 struct hwtstamp_config tstamp_config;
334 unsigned long ptp_tx_start;
335 unsigned long last_rx_ptp_check; 344 unsigned long last_rx_ptp_check;
336 spinlock_t tmreg_lock; /* Used to protect the device time registers. */ 345 spinlock_t tmreg_lock; /* Used to protect the device time registers. */
337 u64 ptp_base_adj; 346 u64 ptp_base_adj;
@@ -420,6 +429,7 @@ struct i40e_vsi {
420 struct i40e_q_vector **q_vectors; 429 struct i40e_q_vector **q_vectors;
421 int num_q_vectors; 430 int num_q_vectors;
422 int base_vector; 431 int base_vector;
432 bool irqs_ready;
423 433
424 u16 seid; /* HW index of this VSI (absolute index) */ 434 u16 seid; /* HW index of this VSI (absolute index) */
425 u16 id; /* VSI number */ 435 u16 id; /* VSI number */
@@ -540,6 +550,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
540 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT); 550 (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
541} 551}
542 552
553/**
554 * i40e_get_fd_cnt_all - get the total FD filter space available
555 * @pf: pointer to the pf struct
556 **/
557static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
558{
559 return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
560}
561
543/* needed by i40e_ethtool.c */ 562/* needed by i40e_ethtool.c */
544int i40e_up(struct i40e_vsi *vsi); 563int i40e_up(struct i40e_vsi *vsi);
545void i40e_down(struct i40e_vsi *vsi); 564void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..7a027499fc57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
33static void i40e_resume_aq(struct i40e_hw *hw); 33static void i40e_resume_aq(struct i40e_hw *hw);
34 34
35/** 35/**
36 * i40e_is_nvm_update_op - return true if this is an NVM update operation
37 * @desc: API request descriptor
38 **/
39static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
40{
41 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
42 (desc->opcode == i40e_aqc_opc_nvm_update);
43}
44
45/**
36 * i40e_adminq_init_regs - Initialize AdminQ registers 46 * i40e_adminq_init_regs - Initialize AdminQ registers
37 * @hw: pointer to the hardware structure 47 * @hw: pointer to the hardware structure
38 * 48 *
@@ -281,8 +291,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
281 * 291 *
282 * Configure base address and length registers for the transmit queue 292 * Configure base address and length registers for the transmit queue
283 **/ 293 **/
284static void i40e_config_asq_regs(struct i40e_hw *hw) 294static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
285{ 295{
296 i40e_status ret_code = 0;
297 u32 reg = 0;
298
286 if (hw->mac.type == I40E_MAC_VF) { 299 if (hw->mac.type == I40E_MAC_VF) {
287 /* configure the transmit queue */ 300 /* configure the transmit queue */
288 wr32(hw, I40E_VF_ATQBAH1, 301 wr32(hw, I40E_VF_ATQBAH1,
@@ -291,6 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
291 lower_32_bits(hw->aq.asq.desc_buf.pa)); 304 lower_32_bits(hw->aq.asq.desc_buf.pa));
292 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | 305 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
293 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 306 I40E_VF_ATQLEN1_ATQENABLE_MASK));
307 reg = rd32(hw, I40E_VF_ATQBAL1);
294 } else { 308 } else {
295 /* configure the transmit queue */ 309 /* configure the transmit queue */
296 wr32(hw, I40E_PF_ATQBAH, 310 wr32(hw, I40E_PF_ATQBAH,
@@ -299,7 +313,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
299 lower_32_bits(hw->aq.asq.desc_buf.pa)); 313 lower_32_bits(hw->aq.asq.desc_buf.pa));
300 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | 314 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
301 I40E_PF_ATQLEN_ATQENABLE_MASK)); 315 I40E_PF_ATQLEN_ATQENABLE_MASK));
316 reg = rd32(hw, I40E_PF_ATQBAL);
302 } 317 }
318
319 /* Check one register to verify that config was applied */
320 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
321 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
322
323 return ret_code;
303} 324}
304 325
305/** 326/**
@@ -308,8 +329,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
308 * 329 *
309 * Configure base address and length registers for the receive (event queue) 330 * Configure base address and length registers for the receive (event queue)
310 **/ 331 **/
311static void i40e_config_arq_regs(struct i40e_hw *hw) 332static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
312{ 333{
334 i40e_status ret_code = 0;
335 u32 reg = 0;
336
313 if (hw->mac.type == I40E_MAC_VF) { 337 if (hw->mac.type == I40E_MAC_VF) {
314 /* configure the receive queue */ 338 /* configure the receive queue */
315 wr32(hw, I40E_VF_ARQBAH1, 339 wr32(hw, I40E_VF_ARQBAH1,
@@ -318,6 +342,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
318 lower_32_bits(hw->aq.arq.desc_buf.pa)); 342 lower_32_bits(hw->aq.arq.desc_buf.pa));
319 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | 343 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
320 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 344 I40E_VF_ARQLEN1_ARQENABLE_MASK));
345 reg = rd32(hw, I40E_VF_ARQBAL1);
321 } else { 346 } else {
322 /* configure the receive queue */ 347 /* configure the receive queue */
323 wr32(hw, I40E_PF_ARQBAH, 348 wr32(hw, I40E_PF_ARQBAH,
@@ -326,10 +351,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
326 lower_32_bits(hw->aq.arq.desc_buf.pa)); 351 lower_32_bits(hw->aq.arq.desc_buf.pa));
327 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | 352 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
328 I40E_PF_ARQLEN_ARQENABLE_MASK)); 353 I40E_PF_ARQLEN_ARQENABLE_MASK));
354 reg = rd32(hw, I40E_PF_ARQBAL);
329 } 355 }
330 356
331 /* Update tail in the HW to post pre-allocated buffers */ 357 /* Update tail in the HW to post pre-allocated buffers */
332 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 358 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
359
360 /* Check one register to verify that config was applied */
361 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
362 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
363
364 return ret_code;
333} 365}
334 366
335/** 367/**
@@ -377,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
377 goto init_adminq_free_rings; 409 goto init_adminq_free_rings;
378 410
379 /* initialize base registers */ 411 /* initialize base registers */
380 i40e_config_asq_regs(hw); 412 ret_code = i40e_config_asq_regs(hw);
413 if (ret_code)
414 goto init_adminq_free_rings;
381 415
382 /* success! */ 416 /* success! */
383 goto init_adminq_exit; 417 goto init_adminq_exit;
@@ -434,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
434 goto init_adminq_free_rings; 468 goto init_adminq_free_rings;
435 469
436 /* initialize base registers */ 470 /* initialize base registers */
437 i40e_config_arq_regs(hw); 471 ret_code = i40e_config_arq_regs(hw);
472 if (ret_code)
473 goto init_adminq_free_rings;
438 474
439 /* success! */ 475 /* success! */
440 goto init_adminq_exit; 476 goto init_adminq_exit;
@@ -577,14 +613,14 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
577 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 613 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
578 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 614 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
579 615
580 if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR || 616 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
581 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
582 ret_code = I40E_ERR_FIRMWARE_API_VERSION; 617 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
583 goto init_adminq_free_arq; 618 goto init_adminq_free_arq;
584 } 619 }
585 620
586 /* pre-emptive resource lock release */ 621 /* pre-emptive resource lock release */
587 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 622 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
623 hw->aq.nvm_busy = false;
588 624
589 ret_code = i40e_aq_set_hmc_resource_profile(hw, 625 ret_code = i40e_aq_set_hmc_resource_profile(hw,
590 I40E_HMC_PROFILE_DEFAULT, 626 I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
708 goto asq_send_command_exit; 744 goto asq_send_command_exit;
709 } 745 }
710 746
747 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
748 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
749 status = I40E_ERR_NVM;
750 goto asq_send_command_exit;
751 }
752
711 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 753 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
712 if (cmd_details) { 754 if (cmd_details) {
713 *details = *cmd_details; 755 *details = *cmd_details;
@@ -835,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
835 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 877 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
836 } 878 }
837 879
880 if (i40e_is_nvm_update_op(desc))
881 hw->aq.nvm_busy = true;
882
838 /* update the error if time out occurred */ 883 /* update the error if time out occurred */
839 if ((!cmd_completed) && 884 if ((!cmd_completed) &&
840 (!details->async && !details->postpone)) { 885 (!details->async && !details->postpone)) {
@@ -929,6 +974,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
929 e->msg_size); 974 e->msg_size);
930 } 975 }
931 976
977 if (i40e_is_nvm_update_op(&e->desc))
978 hw->aq.nvm_busy = false;
979
932 /* Restore the original datalen and buffer address in the desc, 980 /* Restore the original datalen and buffer address in the desc,
933 * FW updates datalen to indicate the event message 981 * FW updates datalen to indicate the event message
934 * size 982 * size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
90 u16 fw_min_ver; /* firmware minor version */ 90 u16 fw_min_ver; /* firmware minor version */
91 u16 api_maj_ver; /* api major version */ 91 u16 api_maj_ver; /* api major version */
92 u16 api_min_ver; /* api minor version */ 92 u16 api_min_ver; /* api minor version */
93 bool nvm_busy;
93 94
94 struct mutex asq_mutex; /* Send queue lock */ 95 struct mutex asq_mutex; /* Send queue lock */
95 struct mutex arq_mutex; /* Receive queue lock */ 96 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..15f289f2917f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
34 */ 34 */
35 35
36#define I40E_FW_API_VERSION_MAJOR 0x0001 36#define I40E_FW_API_VERSION_MAJOR 0x0001
37#define I40E_FW_API_VERSION_MINOR 0x0001 37#define I40E_FW_API_VERSION_MINOR 0x0002
38 38
39struct i40e_aq_desc { 39struct i40e_aq_desc {
40 __le16 flags; 40 __le16 flags;
@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
123 i40e_aqc_opc_get_version = 0x0001, 123 i40e_aqc_opc_get_version = 0x0001,
124 i40e_aqc_opc_driver_version = 0x0002, 124 i40e_aqc_opc_driver_version = 0x0002,
125 i40e_aqc_opc_queue_shutdown = 0x0003, 125 i40e_aqc_opc_queue_shutdown = 0x0003,
126 i40e_aqc_opc_set_pf_context = 0x0004,
126 127
127 /* resource ownership */ 128 /* resource ownership */
128 i40e_aqc_opc_request_resource = 0x0008, 129 i40e_aqc_opc_request_resource = 0x0008,
@@ -182,9 +183,6 @@ enum i40e_admin_queue_opc {
182 i40e_aqc_opc_add_mirror_rule = 0x0260, 183 i40e_aqc_opc_add_mirror_rule = 0x0260,
183 i40e_aqc_opc_delete_mirror_rule = 0x0261, 184 i40e_aqc_opc_delete_mirror_rule = 0x0261,
184 185
185 i40e_aqc_opc_set_storm_control_config = 0x0280,
186 i40e_aqc_opc_get_storm_control_config = 0x0281,
187
188 /* DCB commands */ 186 /* DCB commands */
189 i40e_aqc_opc_dcb_ignore_pfc = 0x0301, 187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
190 i40e_aqc_opc_dcb_updated = 0x0302, 188 i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +205,7 @@ enum i40e_admin_queue_opc {
207 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, 205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
208 i40e_aqc_opc_suspend_port_tx = 0x041B, 206 i40e_aqc_opc_suspend_port_tx = 0x041B,
209 i40e_aqc_opc_resume_port_tx = 0x041C, 207 i40e_aqc_opc_resume_port_tx = 0x041C,
208 i40e_aqc_opc_configure_partition_bw = 0x041D,
210 209
211 /* hmc */ 210 /* hmc */
212 i40e_aqc_opc_query_hmc_resource_profile = 0x0500, 211 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -224,13 +223,15 @@ enum i40e_admin_queue_opc {
224 i40e_aqc_opc_get_partner_advt = 0x0616, 223 i40e_aqc_opc_get_partner_advt = 0x0616,
225 i40e_aqc_opc_set_lb_modes = 0x0618, 224 i40e_aqc_opc_set_lb_modes = 0x0618,
226 i40e_aqc_opc_get_phy_wol_caps = 0x0621, 225 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
227 i40e_aqc_opc_set_phy_reset = 0x0622, 226 i40e_aqc_opc_set_phy_debug = 0x0622,
228 i40e_aqc_opc_upload_ext_phy_fm = 0x0625, 227 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
229 228
230 /* NVM commands */ 229 /* NVM commands */
231 i40e_aqc_opc_nvm_read = 0x0701, 230 i40e_aqc_opc_nvm_read = 0x0701,
232 i40e_aqc_opc_nvm_erase = 0x0702, 231 i40e_aqc_opc_nvm_erase = 0x0702,
233 i40e_aqc_opc_nvm_update = 0x0703, 232 i40e_aqc_opc_nvm_update = 0x0703,
233 i40e_aqc_opc_nvm_config_read = 0x0704,
234 i40e_aqc_opc_nvm_config_write = 0x0705,
234 235
235 /* virtualization commands */ 236 /* virtualization commands */
236 i40e_aqc_opc_send_msg_to_pf = 0x0801, 237 i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -272,8 +273,6 @@ enum i40e_admin_queue_opc {
272 i40e_aqc_opc_debug_set_mode = 0xFF01, 273 i40e_aqc_opc_debug_set_mode = 0xFF01,
273 i40e_aqc_opc_debug_read_reg = 0xFF03, 274 i40e_aqc_opc_debug_read_reg = 0xFF03,
274 i40e_aqc_opc_debug_write_reg = 0xFF04, 275 i40e_aqc_opc_debug_write_reg = 0xFF04,
275 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
276 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 276 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 277 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09, 278 i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -341,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
341 340
342I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); 341I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
343 342
343/* Set PF context (0x0004, direct) */
344struct i40e_aqc_set_pf_context {
345 u8 pf_id;
346 u8 reserved[15];
347};
348
349I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
350
344/* Request resource ownership (direct 0x0008) 351/* Request resource ownership (direct 0x0008)
345 * Release resource ownership (direct 0x0009) 352 * Release resource ownership (direct 0x0009)
346 */ 353 */
@@ -1289,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
1289 1296
1290I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); 1297I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1291 1298
1292/* Set Storm Control Configuration (direct 0x0280)
1293 * Get Storm Control Configuration (direct 0x0281)
1294 * the command and response use the same descriptor structure
1295 */
1296struct i40e_aqc_set_get_storm_control_config {
1297 __le32 broadcast_threshold;
1298 __le32 multicast_threshold;
1299 __le32 control_flags;
1300#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1301#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1302#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1303#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1304#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1305#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1306#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1307 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1308 u8 reserved[4];
1309};
1310
1311I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1312
1313/* DCB 0x03xx*/ 1299/* DCB 0x03xx*/
1314 1300
1315/* PFC Ignore (direct 0x0301) 1301/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1427struct i40e_aqc_configure_switching_comp_ets_data { 1413struct i40e_aqc_configure_switching_comp_ets_data {
1428 u8 reserved[4]; 1414 u8 reserved[4];
1429 u8 tc_valid_bits; 1415 u8 tc_valid_bits;
1430 u8 reserved1; 1416 u8 seepage;
1417#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
1431 u8 tc_strict_priority_flags; 1418 u8 tc_strict_priority_flags;
1432 u8 reserved2[17]; 1419 u8 reserved1[17];
1433 u8 tc_bw_share_credits[8]; 1420 u8 tc_bw_share_credits[8];
1434 u8 reserved3[96]; 1421 u8 reserved2[96];
1435}; 1422};
1436 1423
1437/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1499 * (direct 0x041B and 0x041C) uses the generic SEID struct 1486 * (direct 0x041B and 0x041C) uses the generic SEID struct
1500 */ 1487 */
1501 1488
1489/* Configure partition BW
1490 * (indirect 0x041D)
1491 */
1492struct i40e_aqc_configure_partition_bw_data {
1493 __le16 pf_valid_bits;
1494 u8 min_bw[16]; /* guaranteed bandwidth */
1495 u8 max_bw[16]; /* bandwidth limit */
1496};
1497
1502/* Get and set the active HMC resource profile and status. 1498/* Get and set the active HMC resource profile and status.
1503 * (direct 0x0500) and (direct 0x0501) 1499 * (direct 0x0500) and (direct 0x0501)
1504 */ 1500 */
@@ -1539,6 +1535,8 @@ enum i40e_aq_phy_type {
1539 I40E_PHY_TYPE_XLPPI = 0x9, 1535 I40E_PHY_TYPE_XLPPI = 0x9,
1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, 1536 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, 1537 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1538 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
1539 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
1542 I40E_PHY_TYPE_100BASE_TX = 0x11, 1540 I40E_PHY_TYPE_100BASE_TX = 0x11,
1543 I40E_PHY_TYPE_1000BASE_T = 0x12, 1541 I40E_PHY_TYPE_1000BASE_T = 0x12,
1544 I40E_PHY_TYPE_10GBASE_T = 0x13, 1542 I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1547,10 @@ enum i40e_aq_phy_type {
1549 I40E_PHY_TYPE_40GBASE_CR4 = 0x18, 1547 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1550 I40E_PHY_TYPE_40GBASE_SR4 = 0x19, 1548 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1551 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, 1549 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1552 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, 1550 I40E_PHY_TYPE_1000BASE_SX = 0x1B,
1551 I40E_PHY_TYPE_1000BASE_LX = 0x1C,
1552 I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
1553 I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
1553 I40E_PHY_TYPE_MAX 1554 I40E_PHY_TYPE_MAX
1554}; 1555};
1555 1556
@@ -1583,11 +1584,8 @@ struct i40e_aq_get_phy_abilities_resp {
1583#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 1584#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1584#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 1585#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1585#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 1586#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1586#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 1587#define I40E_AQ_PHY_LINK_ENABLED 0x08
1587#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) 1588#define I40E_AQ_PHY_AN_ENABLED 0x10
1588#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1589#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1590#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1591#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 1589#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1592 __le16 eee_capability; 1590 __le16 eee_capability;
1593#define I40E_AQ_EEE_100BASE_TX 0x0002 1591#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1694,7 @@ struct i40e_aqc_get_link_status {
1696#define I40E_AQ_LINK_TX_ACTIVE 0x00 1694#define I40E_AQ_LINK_TX_ACTIVE 0x00
1697#define I40E_AQ_LINK_TX_DRAINED 0x01 1695#define I40E_AQ_LINK_TX_DRAINED 0x01
1698#define I40E_AQ_LINK_TX_FLUSHED 0x03 1696#define I40E_AQ_LINK_TX_FLUSHED 0x03
1697#define I40E_AQ_LINK_FORCED_40G 0x10
1699 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ 1698 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1700 __le16 max_frame_size; 1699 __le16 max_frame_size;
1701 u8 config; 1700 u8 config;
@@ -1747,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
1747 1746
1748I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); 1747I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1749 1748
1750/* Set PHY Reset command (0x0622) */ 1749/* Set PHY Debug command (0x0622) */
1751struct i40e_aqc_set_phy_reset { 1750struct i40e_aqc_set_phy_debug {
1752 u8 reset_flags; 1751 u8 command_flags;
1753#define I40E_AQ_PHY_RESET_REQUEST 0x02 1752#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
1753#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
1754#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
1755 I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
1756#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
1757#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
1758#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
1759#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
1754 u8 reserved[15]; 1760 u8 reserved[15];
1755}; 1761};
1756 1762
1757I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); 1763I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
1758 1764
1759enum i40e_aq_phy_reg_type { 1765enum i40e_aq_phy_reg_type {
1760 I40E_AQC_PHY_REG_INTERNAL = 0x1, 1766 I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1785,47 @@ struct i40e_aqc_nvm_update {
1779 1785
1780I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); 1786I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1781 1787
1788/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2];
1797 __le32 address_high;
1798 __le32 address_low;
1799};
1800
1801I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
1802
1803/* NVM Config Write (indirect 0x0705) */
1804struct i40e_aqc_nvm_config_write {
1805 __le16 cmd_flags;
1806 __le16 element_count;
1807 u8 reserved[4];
1808 __le32 address_high;
1809 __le32 address_low;
1810};
1811
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813
1814struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id;
1816 __le16 instance_id;
1817 __le16 feature_options;
1818 __le16 feature_selection;
1819};
1820
1821struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
1823 __le16 field_id;
1824 __le16 instance_id;
1825 __le16 field_options;
1826 __le16 field_value;
1827};
1828
1782/* Send to PF command (indirect 0x0801) id is only used by PF 1829/* Send to PF command (indirect 0x0801) id is only used by PF
1783 * Send to VF command (indirect 0x0802) id is only used by PF 1830 * Send to VF command (indirect 0x0802) id is only used by PF
1784 * Send to Peer PF command (indirect 0x0803) 1831 * Send to Peer PF command (indirect 0x0803)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..6e65f19dd6e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
44 switch (hw->device_id) { 44 switch (hw->device_id) {
45 case I40E_DEV_ID_SFP_XL710: 45 case I40E_DEV_ID_SFP_XL710:
46 case I40E_DEV_ID_SFP_X710:
47 case I40E_DEV_ID_QEMU: 46 case I40E_DEV_ID_QEMU:
48 case I40E_DEV_ID_KX_A: 47 case I40E_DEV_ID_KX_A:
49 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_B:
50 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_KX_C:
51 case I40E_DEV_ID_KX_D:
52 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_A:
53 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_B:
54 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_QSFP_C:
@@ -133,7 +131,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
133 **/ 131 **/
134bool i40e_check_asq_alive(struct i40e_hw *hw) 132bool i40e_check_asq_alive(struct i40e_hw *hw)
135{ 133{
136 return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); 134 if (hw->aq.asq.len)
135 return !!(rd32(hw, hw->aq.asq.len) &
136 I40E_PF_ATQLEN_ATQENABLE_MASK);
137 else
138 return false;
137} 139}
138 140
139/** 141/**
@@ -653,6 +655,36 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
653} 655}
654 656
655/** 657/**
658 * i40e_pre_tx_queue_cfg - pre tx queue configure
659 * @hw: pointer to the HW structure
660 * @queue: target pf queue index
661 * @enable: state change request
662 *
663 * Handles hw requirement to indicate intention to enable
664 * or disable target queue.
665 **/
666void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
667{
668 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
669 u32 reg_block = 0;
670 u32 reg_val;
671
672 if (abs_queue_idx >= 128)
673 reg_block = abs_queue_idx / 128;
674
675 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
676 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
677 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
678
679 if (enable)
680 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
681 else
682 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
683
684 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
685}
686
687/**
656 * i40e_get_media_type - Gets media type 688 * i40e_get_media_type - Gets media type
657 * @hw: pointer to the hardware structure 689 * @hw: pointer to the hardware structure
658 **/ 690 **/
@@ -699,7 +731,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
699} 731}
700 732
701#define I40E_PF_RESET_WAIT_COUNT_A0 200 733#define I40E_PF_RESET_WAIT_COUNT_A0 200
702#define I40E_PF_RESET_WAIT_COUNT 10 734#define I40E_PF_RESET_WAIT_COUNT 100
703/** 735/**
704 * i40e_pf_reset - Reset the PF 736 * i40e_pf_reset - Reset the PF
705 * @hw: pointer to the hardware structure 737 * @hw: pointer to the hardware structure
@@ -789,6 +821,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
789{ 821{
790 u32 reg; 822 u32 reg;
791 823
824 if (i40e_check_asq_alive(hw))
825 i40e_aq_clear_pxe_mode(hw, NULL);
826
792 /* Clear single descriptor fetch/write-back mode */ 827 /* Clear single descriptor fetch/write-back mode */
793 reg = rd32(hw, I40E_GLLAN_RCTL_0); 828 reg = rd32(hw, I40E_GLLAN_RCTL_0);
794 829
@@ -907,6 +942,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
907/* Admin command wrappers */ 942/* Admin command wrappers */
908 943
909/** 944/**
945 * i40e_aq_clear_pxe_mode
946 * @hw: pointer to the hw struct
947 * @cmd_details: pointer to command details structure or NULL
948 *
949 * Tell the firmware that the driver is taking over from PXE
950 **/
951i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
952 struct i40e_asq_cmd_details *cmd_details)
953{
954 i40e_status status;
955 struct i40e_aq_desc desc;
956 struct i40e_aqc_clear_pxe *cmd =
957 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
958
959 i40e_fill_default_direct_cmd_desc(&desc,
960 i40e_aqc_opc_clear_pxe_mode);
961
962 cmd->rx_cnt = 0x2;
963
964 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
965
966 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
967
968 return status;
969}
970
971/**
910 * i40e_aq_set_link_restart_an 972 * i40e_aq_set_link_restart_an
911 * @hw: pointer to the hw struct 973 * @hw: pointer to the hw struct
912 * @cmd_details: pointer to command details structure or NULL 974 * @cmd_details: pointer to command details structure or NULL
@@ -975,6 +1037,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
975 hw_link_info->an_info = resp->an_info; 1037 hw_link_info->an_info = resp->an_info;
976 hw_link_info->ext_info = resp->ext_info; 1038 hw_link_info->ext_info = resp->ext_info;
977 hw_link_info->loopback = resp->loopback; 1039 hw_link_info->loopback = resp->loopback;
1040 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1041 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1042
1043 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1044 hw_link_info->crc_enable = true;
1045 else
1046 hw_link_info->crc_enable = false;
978 1047
979 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) 1048 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
980 hw_link_info->lse_enable = true; 1049 hw_link_info->lse_enable = true;
@@ -1021,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1021 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1090 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1022 1091
1023 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1092 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1024 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1025 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1026 1093
1027 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1094 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1028 sizeof(vsi_ctx->info), cmd_details); 1095 sizeof(vsi_ctx->info), cmd_details);
@@ -1163,8 +1230,6 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
1163 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 1230 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1164 1231
1165 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1232 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1166 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1167 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1168 1233
1169 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1234 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1170 sizeof(vsi_ctx->info), NULL); 1235 sizeof(vsi_ctx->info), NULL);
@@ -1203,8 +1268,6 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
1203 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 1268 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1204 1269
1205 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1270 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1206 if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
1207 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1208 1271
1209 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1272 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1210 sizeof(vsi_ctx->info), cmd_details); 1273 sizeof(vsi_ctx->info), cmd_details);
@@ -1300,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
1300 struct i40e_aqc_driver_version *cmd = 1363 struct i40e_aqc_driver_version *cmd =
1301 (struct i40e_aqc_driver_version *)&desc.params.raw; 1364 (struct i40e_aqc_driver_version *)&desc.params.raw;
1302 i40e_status status; 1365 i40e_status status;
1366 u16 len;
1303 1367
1304 if (dv == NULL) 1368 if (dv == NULL)
1305 return I40E_ERR_PARAM; 1369 return I40E_ERR_PARAM;
@@ -1311,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
1311 cmd->driver_minor_ver = dv->minor_version; 1375 cmd->driver_minor_ver = dv->minor_version;
1312 cmd->driver_build_ver = dv->build_version; 1376 cmd->driver_build_ver = dv->build_version;
1313 cmd->driver_subbuild_ver = dv->subbuild_version; 1377 cmd->driver_subbuild_ver = dv->subbuild_version;
1314 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1378
1379 len = 0;
1380 while (len < sizeof(dv->driver_string) &&
1381 (dv->driver_string[len] < 0x80) &&
1382 dv->driver_string[len])
1383 len++;
1384 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
1385 len, cmd_details);
1315 1386
1316 return status; 1387 return status;
1317} 1388}
@@ -1900,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
1900 } 1971 }
1901 } 1972 }
1902 1973
1974 /* Software override ensuring FCoE is disabled if npar or mfp
1975 * mode because it is not supported in these modes.
1976 */
1977 if (p->npar_enable || p->mfp_mode_1)
1978 p->fcoe = false;
1979
1903 /* additional HW specific goodies that might 1980 /* additional HW specific goodies that might
1904 * someday be HW version specific 1981 * someday be HW version specific
1905 */ 1982 */
@@ -2094,8 +2171,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
2094 * @cmd_details: pointer to command details structure or NULL 2171 * @cmd_details: pointer to command details structure or NULL
2095 **/ 2172 **/
2096i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 2173i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2097 u16 udp_port, u8 header_len, 2174 u16 udp_port, u8 protocol_index,
2098 u8 protocol_index, u8 *filter_index, 2175 u8 *filter_index,
2099 struct i40e_asq_cmd_details *cmd_details) 2176 struct i40e_asq_cmd_details *cmd_details)
2100{ 2177{
2101 struct i40e_aq_desc desc; 2178 struct i40e_aq_desc desc;
@@ -2253,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
2253} 2330}
2254 2331
2255/** 2332/**
2333 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
2334 * @hw: pointer to the hw struct
2335 * @seid: VSI seid
2336 * @credit: BW limit credits (0 = disabled)
2337 * @max_credit: Max BW limit credits
2338 * @cmd_details: pointer to command details structure or NULL
2339 **/
2340i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
2341 u16 seid, u16 credit, u8 max_credit,
2342 struct i40e_asq_cmd_details *cmd_details)
2343{
2344 struct i40e_aq_desc desc;
2345 struct i40e_aqc_configure_vsi_bw_limit *cmd =
2346 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
2347 i40e_status status;
2348
2349 i40e_fill_default_direct_cmd_desc(&desc,
2350 i40e_aqc_opc_configure_vsi_bw_limit);
2351
2352 cmd->vsi_seid = cpu_to_le16(seid);
2353 cmd->credit = cpu_to_le16(credit);
2354 cmd->max_credit = max_credit;
2355
2356 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2357
2358 return status;
2359}
2360
2361/**
2256 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 2362 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
2257 * @hw: pointer to the hw struct 2363 * @hw: pointer to the hw struct
2258 * @seid: VSI seid 2364 * @seid: VSI seid
@@ -2405,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
2405{ 2511{
2406 u32 fcoe_cntx_size, fcoe_filt_size; 2512 u32 fcoe_cntx_size, fcoe_filt_size;
2407 u32 pe_cntx_size, pe_filt_size; 2513 u32 pe_cntx_size, pe_filt_size;
2408 u32 fcoe_fmax, pe_fmax; 2514 u32 fcoe_fmax;
2409 u32 val; 2515 u32 val;
2410 2516
2411 /* Validate FCoE settings passed */ 2517 /* Validate FCoE settings passed */
@@ -2480,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
2480 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 2586 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
2481 return I40E_ERR_INVALID_SIZE; 2587 return I40E_ERR_INVALID_SIZE;
2482 2588
2483 /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
2484 val = rd32(hw, I40E_GLHMC_PEXFMAX);
2485 pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
2486 >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
2487 if (pe_filt_size + pe_cntx_size > pe_fmax)
2488 return I40E_ERR_INVALID_SIZE;
2489
2490 return 0; 2589 return 0;
2491} 2590}
2492 2591
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103abfd0d..00bc0cdb3a03 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
232 struct i40e_ieee_app_priority_table *app) 232 struct i40e_ieee_app_priority_table *app)
233{ 233{
234 int v, err; 234 int v, err;
235 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 235 for (v = 0; v < pf->num_alloc_vsi; v++) {
236 if (pf->vsi[v] && pf->vsi[v]->netdev) { 236 if (pf->vsi[v] && pf->vsi[v]->netdev) {
237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); 237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
238 if (err) 238 if (err)
@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
302 struct net_device *dev = vsi->netdev; 302 struct net_device *dev = vsi->netdev;
303 struct i40e_pf *pf = i40e_netdev_to_pf(dev); 303 struct i40e_pf *pf = i40e_netdev_to_pf(dev);
304 304
305 /* DCB not enabled */ 305 /* Not DCB capable */
306 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 306 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
307 return; 307 return;
308 308
309 /* Do not setup DCB NL ops for MFP mode */ 309 /* Do not setup DCB NL ops for MFP mode */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..cffdfc21290f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
45 if (seid < 0) 45 if (seid < 0)
46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
47 else 47 else
48 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 48 for (i = 0; i < pf->num_alloc_vsi; i++)
49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) 49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
50 return pf->vsi[i]; 50 return pf->vsi[i];
51 51
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
843{ 843{
844 int i; 844 int i;
845 845
846 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 846 for (i = 0; i < pf->num_alloc_vsi; i++)
847 if (pf->vsi[i]) 847 if (pf->vsi[i])
848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", 848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
849 i, pf->vsi[i]->seid); 849 i, pf->vsi[i]->seid);
@@ -862,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
862 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", 862 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
863 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); 863 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
864 dev_info(&pf->pdev->dev, 864 dev_info(&pf->pdev->dev,
865 " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n", 865 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
866 estats->rx_broadcast, estats->rx_discards, estats->rx_errors); 866 estats->rx_broadcast, estats->rx_discards);
867 dev_info(&pf->pdev->dev, 867 dev_info(&pf->pdev->dev,
868 " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", 868 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
869 estats->rx_missed, estats->rx_unknown_protocol, 869 estats->rx_unknown_protocol, estats->tx_bytes);
870 estats->tx_bytes);
871 dev_info(&pf->pdev->dev, 870 dev_info(&pf->pdev->dev,
872 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", 871 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
873 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); 872 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -1527,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1527 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1526 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1528 if (cnt == 0) { 1527 if (cnt == 0) {
1529 int i; 1528 int i;
1530 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 1529 for (i = 0; i < pf->num_alloc_vsi; i++)
1531 i40e_vsi_reset_stats(pf->vsi[i]); 1530 i40e_vsi_reset_stats(pf->vsi[i]);
1532 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1531 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1533 } else if (cnt == 1) { 1532 } else if (cnt == 1) {
@@ -1744,10 +1743,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1744 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); 1743 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
1745 } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { 1744 } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
1746 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); 1745 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
1747 } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
1748 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
1749 } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
1750 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
1751 } else if (strncmp(cmd_buf, "lldp", 4) == 0) { 1746 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1752 if (strncmp(&cmd_buf[5], "stop", 4) == 0) { 1747 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1753 int ret; 1748 int ret;
@@ -1967,8 +1962,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1967 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); 1962 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
1968 dev_info(&pf->pdev->dev, " fd-atr off\n"); 1963 dev_info(&pf->pdev->dev, " fd-atr off\n");
1969 dev_info(&pf->pdev->dev, " fd-atr on\n"); 1964 dev_info(&pf->pdev->dev, " fd-atr on\n");
1970 dev_info(&pf->pdev->dev, " fd-sb off\n");
1971 dev_info(&pf->pdev->dev, " fd-sb on\n");
1972 dev_info(&pf->pdev->dev, " lldp start\n"); 1965 dev_info(&pf->pdev->dev, " lldp start\n");
1973 dev_info(&pf->pdev->dev, " lldp stop\n"); 1966 dev_info(&pf->pdev->dev, " lldp stop\n");
1974 dev_info(&pf->pdev->dev, " lldp get local\n"); 1967 dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index b2380daef8c1..56438bd579e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -67,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
67 67
68struct i40e_diag_reg_test_info i40e_reg_list[] = { 68struct i40e_diag_reg_test_info i40e_reg_list[] = {
69 /* offset mask elements stride */ 69 /* offset mask elements stride */
70 {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, 70 {I40E_QTX_CTL(0), 0x0000FFBF, 1,
71 {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, 71 I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
72 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, 72 {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
73 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, 73 I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
74 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, 74 {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
75 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, 75 I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
76 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, 76 {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
77 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, 77 I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
78 {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, 78 {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
79 {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, 79 I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
80 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, 80 {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
81 {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
82 {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
83 I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
84 {I40E_QINT_TQCTL(0), 0x000000FF, 1,
85 I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
86 {I40E_QINT_RQCTL(0), 0x000000FF, 1,
87 I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
88 {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
81 { 0 } 89 { 0 }
82}; 90};
83 91
@@ -93,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
93 u32 reg, mask; 101 u32 reg, mask;
94 u32 i, j; 102 u32 i, j;
95 103
96 for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) { 104 for (i = 0; i40e_reg_list[i].offset != 0 &&
105 !ret_code; i++) {
106
107 /* set actual reg range for dynamically allocated resources */
108 if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
109 hw->func_caps.num_tx_qp != 0)
110 i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
111 if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
112 i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
113 i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
114 i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
115 i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
116 hw->func_caps.num_msix_vectors != 0)
117 i40e_reg_list[i].elements =
118 hw->func_caps.num_msix_vectors - 1;
119
120 /* test register access */
97 mask = i40e_reg_list[i].mask; 121 mask = i40e_reg_list[i].mask;
98 for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) { 122 for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
99 reg = i40e_reg_list[i].offset + 123 reg = i40e_reg_list[i].offset +
100 (j * i40e_reg_list[i].stride); 124 (j * i40e_reg_list[i].stride);
101 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); 125 ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..4a488ffcd6b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -46,6 +46,8 @@ struct i40e_stats {
46 I40E_STAT(struct i40e_pf, _name, _stat) 46 I40E_STAT(struct i40e_pf, _name, _stat)
47#define I40E_VSI_STAT(_name, _stat) \ 47#define I40E_VSI_STAT(_name, _stat) \
48 I40E_STAT(struct i40e_vsi, _name, _stat) 48 I40E_STAT(struct i40e_vsi, _name, _stat)
49#define I40E_VEB_STAT(_name, _stat) \
50 I40E_STAT(struct i40e_veb, _name, _stat)
49 51
50static const struct i40e_stats i40e_gstrings_net_stats[] = { 52static const struct i40e_stats i40e_gstrings_net_stats[] = {
51 I40E_NETDEV_STAT(rx_packets), 53 I40E_NETDEV_STAT(rx_packets),
@@ -56,12 +58,36 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
56 I40E_NETDEV_STAT(tx_errors), 58 I40E_NETDEV_STAT(tx_errors),
57 I40E_NETDEV_STAT(rx_dropped), 59 I40E_NETDEV_STAT(rx_dropped),
58 I40E_NETDEV_STAT(tx_dropped), 60 I40E_NETDEV_STAT(tx_dropped),
59 I40E_NETDEV_STAT(multicast),
60 I40E_NETDEV_STAT(collisions), 61 I40E_NETDEV_STAT(collisions),
61 I40E_NETDEV_STAT(rx_length_errors), 62 I40E_NETDEV_STAT(rx_length_errors),
62 I40E_NETDEV_STAT(rx_crc_errors), 63 I40E_NETDEV_STAT(rx_crc_errors),
63}; 64};
64 65
66static const struct i40e_stats i40e_gstrings_veb_stats[] = {
67 I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
68 I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
69 I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
70 I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
71 I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
72 I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
73 I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
74 I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
75 I40E_VEB_STAT("rx_discards", stats.rx_discards),
76 I40E_VEB_STAT("tx_discards", stats.tx_discards),
77 I40E_VEB_STAT("tx_errors", stats.tx_errors),
78 I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
79};
80
81static const struct i40e_stats i40e_gstrings_misc_stats[] = {
82 I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
83 I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
84 I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
85 I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
86 I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
87 I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
88 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
89};
90
65static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, 91static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
66 struct ethtool_rxnfc *cmd); 92 struct ethtool_rxnfc *cmd);
67 93
@@ -78,7 +104,12 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
78static struct i40e_stats i40e_gstrings_stats[] = { 104static struct i40e_stats i40e_gstrings_stats[] = {
79 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), 105 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
80 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), 106 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
81 I40E_PF_STAT("rx_errors", stats.eth.rx_errors), 107 I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
108 I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
109 I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
110 I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
111 I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
112 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
82 I40E_PF_STAT("tx_errors", stats.eth.tx_errors), 113 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
83 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), 114 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
84 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), 115 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -88,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
88 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 119 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
89 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 120 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
90 I40E_PF_STAT("tx_timeout", tx_timeout_count), 121 I40E_PF_STAT("tx_timeout", tx_timeout_count),
122 I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
91 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 123 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
92 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 124 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
93 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 125 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -112,8 +144,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
112 I40E_PF_STAT("rx_oversize", stats.rx_oversize), 144 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
113 I40E_PF_STAT("rx_jabber", stats.rx_jabber), 145 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
114 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 146 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
115 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
116 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
149 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
150
117 /* LPI stats */ 151 /* LPI stats */
118 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), 152 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
119 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), 153 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
@@ -122,11 +156,14 @@ static struct i40e_stats i40e_gstrings_stats[] = {
122}; 156};
123 157
124#define I40E_QUEUE_STATS_LEN(n) \ 158#define I40E_QUEUE_STATS_LEN(n) \
125 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \ 159 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
126 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2) 160 * 2 /* Tx and Rx together */ \
161 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
127#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) 162#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
128#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) 163#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
164#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
129#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 165#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
166 I40E_MISC_STATS_LEN + \
130 I40E_QUEUE_STATS_LEN((n))) 167 I40E_QUEUE_STATS_LEN((n)))
131#define I40E_PFC_STATS_LEN ( \ 168#define I40E_PFC_STATS_LEN ( \
132 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ 169 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -135,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
135 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ 172 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
136 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ 173 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
137 / sizeof(u64)) 174 / sizeof(u64))
175#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
138#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ 176#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
139 I40E_PFC_STATS_LEN + \ 177 I40E_PFC_STATS_LEN + \
140 I40E_VSI_STATS_LEN((n))) 178 I40E_VSI_STATS_LEN((n)))
@@ -620,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
620 case ETH_SS_TEST: 658 case ETH_SS_TEST:
621 return I40E_TEST_LEN; 659 return I40E_TEST_LEN;
622 case ETH_SS_STATS: 660 case ETH_SS_STATS:
623 if (vsi == pf->vsi[pf->lan_vsi]) 661 if (vsi == pf->vsi[pf->lan_vsi]) {
624 return I40E_PF_STATS_LEN(netdev); 662 int len = I40E_PF_STATS_LEN(netdev);
625 else 663
664 if (pf->lan_veb != I40E_NO_VEB)
665 len += I40E_VEB_STATS_LEN;
666 return len;
667 } else {
626 return I40E_VSI_STATS_LEN(netdev); 668 return I40E_VSI_STATS_LEN(netdev);
669 }
627 default: 670 default:
628 return -EOPNOTSUPP; 671 return -EOPNOTSUPP;
629 } 672 }
@@ -633,6 +676,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
633 struct ethtool_stats *stats, u64 *data) 676 struct ethtool_stats *stats, u64 *data)
634{ 677{
635 struct i40e_netdev_priv *np = netdev_priv(netdev); 678 struct i40e_netdev_priv *np = netdev_priv(netdev);
679 struct i40e_ring *tx_ring, *rx_ring;
636 struct i40e_vsi *vsi = np->vsi; 680 struct i40e_vsi *vsi = np->vsi;
637 struct i40e_pf *pf = vsi->back; 681 struct i40e_pf *pf = vsi->back;
638 int i = 0; 682 int i = 0;
@@ -648,10 +692,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
648 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == 692 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
649 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 693 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
650 } 694 }
695 for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
696 p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
697 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
698 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
699 }
651 rcu_read_lock(); 700 rcu_read_lock();
652 for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) { 701 for (j = 0; j < vsi->num_queue_pairs; j++) {
653 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); 702 tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
654 struct i40e_ring *rx_ring;
655 703
656 if (!tx_ring) 704 if (!tx_ring)
657 continue; 705 continue;
@@ -662,33 +710,45 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
662 data[i] = tx_ring->stats.packets; 710 data[i] = tx_ring->stats.packets;
663 data[i + 1] = tx_ring->stats.bytes; 711 data[i + 1] = tx_ring->stats.bytes;
664 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 712 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
713 i += 2;
665 714
666 /* Rx ring is the 2nd half of the queue pair */ 715 /* Rx ring is the 2nd half of the queue pair */
667 rx_ring = &tx_ring[1]; 716 rx_ring = &tx_ring[1];
668 do { 717 do {
669 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 718 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
670 data[i + 2] = rx_ring->stats.packets; 719 data[i] = rx_ring->stats.packets;
671 data[i + 3] = rx_ring->stats.bytes; 720 data[i + 1] = rx_ring->stats.bytes;
672 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 721 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
722 i += 2;
673 } 723 }
674 rcu_read_unlock(); 724 rcu_read_unlock();
675 if (vsi == pf->vsi[pf->lan_vsi]) { 725 if (vsi != pf->vsi[pf->lan_vsi])
676 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 726 return;
677 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 727
678 data[i++] = (i40e_gstrings_stats[j].sizeof_stat == 728 if (pf->lan_veb != I40E_NO_VEB) {
679 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 729 struct i40e_veb *veb = pf->veb[pf->lan_veb];
680 } 730 for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
681 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 731 p = (char *)veb;
682 data[i++] = pf->stats.priority_xon_tx[j]; 732 p += i40e_gstrings_veb_stats[j].stat_offset;
683 data[i++] = pf->stats.priority_xoff_tx[j]; 733 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
684 } 734 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
685 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
686 data[i++] = pf->stats.priority_xon_rx[j];
687 data[i++] = pf->stats.priority_xoff_rx[j];
688 } 735 }
689 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
690 data[i++] = pf->stats.priority_xon_2_xoff[j];
691 } 736 }
737 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
738 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
739 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
740 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
741 }
742 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
743 data[i++] = pf->stats.priority_xon_tx[j];
744 data[i++] = pf->stats.priority_xoff_tx[j];
745 }
746 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
747 data[i++] = pf->stats.priority_xon_rx[j];
748 data[i++] = pf->stats.priority_xoff_rx[j];
749 }
750 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
751 data[i++] = pf->stats.priority_xon_2_xoff[j];
692} 752}
693 753
694static void i40e_get_strings(struct net_device *netdev, u32 stringset, 754static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -713,6 +773,11 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
713 i40e_gstrings_net_stats[i].stat_string); 773 i40e_gstrings_net_stats[i].stat_string);
714 p += ETH_GSTRING_LEN; 774 p += ETH_GSTRING_LEN;
715 } 775 }
776 for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
777 snprintf(p, ETH_GSTRING_LEN, "%s",
778 i40e_gstrings_misc_stats[i].stat_string);
779 p += ETH_GSTRING_LEN;
780 }
716 for (i = 0; i < vsi->num_queue_pairs; i++) { 781 for (i = 0; i < vsi->num_queue_pairs; i++) {
717 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); 782 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
718 p += ETH_GSTRING_LEN; 783 p += ETH_GSTRING_LEN;
@@ -723,34 +788,42 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
723 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); 788 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
724 p += ETH_GSTRING_LEN; 789 p += ETH_GSTRING_LEN;
725 } 790 }
726 if (vsi == pf->vsi[pf->lan_vsi]) { 791 if (vsi != pf->vsi[pf->lan_vsi])
727 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { 792 return;
728 snprintf(p, ETH_GSTRING_LEN, "port.%s", 793
729 i40e_gstrings_stats[i].stat_string); 794 if (pf->lan_veb != I40E_NO_VEB) {
730 p += ETH_GSTRING_LEN; 795 for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
731 } 796 snprintf(p, ETH_GSTRING_LEN, "veb.%s",
732 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 797 i40e_gstrings_veb_stats[i].stat_string);
733 snprintf(p, ETH_GSTRING_LEN,
734 "port.tx_priority_%u_xon", i);
735 p += ETH_GSTRING_LEN;
736 snprintf(p, ETH_GSTRING_LEN,
737 "port.tx_priority_%u_xoff", i);
738 p += ETH_GSTRING_LEN;
739 }
740 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
741 snprintf(p, ETH_GSTRING_LEN,
742 "port.rx_priority_%u_xon", i);
743 p += ETH_GSTRING_LEN;
744 snprintf(p, ETH_GSTRING_LEN,
745 "port.rx_priority_%u_xoff", i);
746 p += ETH_GSTRING_LEN;
747 }
748 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
749 snprintf(p, ETH_GSTRING_LEN,
750 "port.rx_priority_%u_xon_2_xoff", i);
751 p += ETH_GSTRING_LEN; 798 p += ETH_GSTRING_LEN;
752 } 799 }
753 } 800 }
801 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
802 snprintf(p, ETH_GSTRING_LEN, "port.%s",
803 i40e_gstrings_stats[i].stat_string);
804 p += ETH_GSTRING_LEN;
805 }
806 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
807 snprintf(p, ETH_GSTRING_LEN,
808 "port.tx_priority_%u_xon", i);
809 p += ETH_GSTRING_LEN;
810 snprintf(p, ETH_GSTRING_LEN,
811 "port.tx_priority_%u_xoff", i);
812 p += ETH_GSTRING_LEN;
813 }
814 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
815 snprintf(p, ETH_GSTRING_LEN,
816 "port.rx_priority_%u_xon", i);
817 p += ETH_GSTRING_LEN;
818 snprintf(p, ETH_GSTRING_LEN,
819 "port.rx_priority_%u_xoff", i);
820 p += ETH_GSTRING_LEN;
821 }
822 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
823 snprintf(p, ETH_GSTRING_LEN,
824 "port.rx_priority_%u_xon_2_xoff", i);
825 p += ETH_GSTRING_LEN;
826 }
754 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ 827 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
755 break; 828 break;
756 } 829 }
@@ -1007,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
1007 ec->rx_max_coalesced_frames_irq = vsi->work_limit; 1080 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1008 1081
1009 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 1082 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1010 ec->rx_coalesce_usecs = 1; 1083 ec->use_adaptive_rx_coalesce = 1;
1011 else
1012 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
1013 1084
1014 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 1085 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1015 ec->tx_coalesce_usecs = 1; 1086 ec->use_adaptive_tx_coalesce = 1;
1016 else 1087
1017 ec->tx_coalesce_usecs = vsi->tx_itr_setting; 1088 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
1089 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
1018 1090
1019 return 0; 1091 return 0;
1020} 1092}
@@ -1033,37 +1105,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
1033 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) 1105 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1034 vsi->work_limit = ec->tx_max_coalesced_frames_irq; 1106 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1035 1107
1036 switch (ec->rx_coalesce_usecs) { 1108 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1037 case 0: 1109 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
1038 vsi->rx_itr_setting = 0;
1039 break;
1040 case 1:
1041 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1042 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1043 break;
1044 default:
1045 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1046 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1047 return -EINVAL;
1048 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 1110 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1049 break; 1111 else
1050 } 1112 return -EINVAL;
1051 1113
1052 switch (ec->tx_coalesce_usecs) { 1114 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
1053 case 0: 1115 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
1054 vsi->tx_itr_setting = 0;
1055 break;
1056 case 1:
1057 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1058 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1059 break;
1060 default:
1061 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1062 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1063 return -EINVAL;
1064 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 1116 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1065 break; 1117 else
1066 } 1118 return -EINVAL;
1119
1120 if (ec->use_adaptive_rx_coalesce)
1121 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
1122 else
1123 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
1124
1125 if (ec->use_adaptive_tx_coalesce)
1126 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
1127 else
1128 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
1067 1129
1068 vector = vsi->base_vector; 1130 vector = vsi->base_vector;
1069 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1131 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1140,8 +1202,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1140 int cnt = 0; 1202 int cnt = 0;
1141 1203
1142 /* report total rule count */ 1204 /* report total rule count */
1143 cmd->data = pf->hw.fdir_shared_filter_count + 1205 cmd->data = i40e_get_fd_cnt_all(pf);
1144 pf->fdir_pf_filter_count;
1145 1206
1146 hlist_for_each_entry_safe(rule, node2, 1207 hlist_for_each_entry_safe(rule, node2,
1147 &pf->fdir_filter_list, fdir_node) { 1208 &pf->fdir_filter_list, fdir_node) {
@@ -1175,10 +1236,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1175 struct i40e_fdir_filter *rule = NULL; 1236 struct i40e_fdir_filter *rule = NULL;
1176 struct hlist_node *node2; 1237 struct hlist_node *node2;
1177 1238
1178 /* report total rule count */
1179 cmd->data = pf->hw.fdir_shared_filter_count +
1180 pf->fdir_pf_filter_count;
1181
1182 hlist_for_each_entry_safe(rule, node2, 1239 hlist_for_each_entry_safe(rule, node2,
1183 &pf->fdir_filter_list, fdir_node) { 1240 &pf->fdir_filter_list, fdir_node) {
1184 if (fsp->location <= rule->fd_id) 1241 if (fsp->location <= rule->fd_id)
@@ -1189,11 +1246,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1189 return -EINVAL; 1246 return -EINVAL;
1190 1247
1191 fsp->flow_type = rule->flow_type; 1248 fsp->flow_type = rule->flow_type;
1192 fsp->h_u.tcp_ip4_spec.psrc = rule->src_port; 1249 if (fsp->flow_type == IP_USER_FLOW) {
1193 fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port; 1250 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1194 fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0]; 1251 fsp->h_u.usr_ip4_spec.proto = 0;
1195 fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0]; 1252 fsp->m_u.usr_ip4_spec.proto = 0;
1196 fsp->ring_cookie = rule->q_index; 1253 }
1254
1255 /* Reverse the src and dest notion, since the HW views them from
1256 * Tx perspective where as the user expects it from Rx filter view.
1257 */
1258 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
1259 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
1260 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
1261 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
1262
1263 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
1264 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1265 else
1266 fsp->ring_cookie = rule->q_index;
1197 1267
1198 return 0; 1268 return 0;
1199} 1269}
@@ -1223,6 +1293,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1223 break; 1293 break;
1224 case ETHTOOL_GRXCLSRLCNT: 1294 case ETHTOOL_GRXCLSRLCNT:
1225 cmd->rule_cnt = pf->fdir_pf_active_filters; 1295 cmd->rule_cnt = pf->fdir_pf_active_filters;
1296 /* report total rule count */
1297 cmd->data = i40e_get_fd_cnt_all(pf);
1226 ret = 0; 1298 ret = 0;
1227 break; 1299 break;
1228 case ETHTOOL_GRXCLSRULE: 1300 case ETHTOOL_GRXCLSRULE:
@@ -1291,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1291 case UDP_V4_FLOW: 1363 case UDP_V4_FLOW:
1292 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1364 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1293 case 0: 1365 case 0:
1294 hena &= 1366 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1295 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 1367 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1296 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1297 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1298 break; 1368 break;
1299 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1369 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1300 hena |= 1370 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
1301 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 1371 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1302 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1303 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1304 break; 1372 break;
1305 default: 1373 default:
1306 return -EINVAL; 1374 return -EINVAL;
@@ -1309,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1309 case UDP_V6_FLOW: 1377 case UDP_V6_FLOW:
1310 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1378 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1311 case 0: 1379 case 0:
1312 hena &= 1380 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1313 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 1381 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1314 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1315 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1316 break; 1382 break;
1317 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 1383 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1318 hena |= 1384 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
1319 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 1385 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1320 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1321 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1322 break; 1386 break;
1323 default: 1387 default:
1324 return -EINVAL; 1388 return -EINVAL;
@@ -1503,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1503 return -EINVAL; 1567 return -EINVAL;
1504 } 1568 }
1505 1569
1506 if (fsp->ring_cookie >= vsi->num_queue_pairs) 1570 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1571 (fsp->ring_cookie >= vsi->num_queue_pairs))
1507 return -EINVAL; 1572 return -EINVAL;
1508 1573
1509 input = kzalloc(sizeof(*input), GFP_KERNEL); 1574 input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -1524,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1524 input->pctype = 0; 1589 input->pctype = 0;
1525 input->dest_vsi = vsi->id; 1590 input->dest_vsi = vsi->id;
1526 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; 1591 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1527 input->cnt_index = 0; 1592 input->cnt_index = pf->fd_sb_cnt_idx;
1528 input->flow_type = fsp->flow_type; 1593 input->flow_type = fsp->flow_type;
1529 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; 1594 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
1530 input->src_port = fsp->h_u.tcp_ip4_spec.psrc; 1595
1531 input->dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1596 /* Reverse the src and dest notion, since the HW expects them to be from
1532 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 1597 * Tx perspective where as the input from user is from Rx filter view.
1533 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 1598 */
1599 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
1600 input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
1601 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
1602 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
1534 1603
1535 ret = i40e_add_del_fdir(vsi, input, true); 1604 ret = i40e_add_del_fdir(vsi, input, true);
1536 if (ret) 1605 if (ret)
@@ -1692,5 +1761,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
1692 1761
1693void i40e_set_ethtool_ops(struct net_device *netdev) 1762void i40e_set_ethtool_ops(struct net_device *netdev)
1694{ 1763{
1695 SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops); 1764 netdev->ethtool_ops = &i40e_ethtool_ops;
1696} 1765}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index bf2d4cc5b569..9b987ccc9e82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -201,7 +201,7 @@ exit:
201 **/ 201 **/
202i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 202i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
203 struct i40e_hmc_info *hmc_info, 203 struct i40e_hmc_info *hmc_info,
204 u32 idx, bool is_pf) 204 u32 idx)
205{ 205{
206 i40e_status ret_code = 0; 206 i40e_status ret_code = 0;
207 struct i40e_hmc_pd_entry *pd_entry; 207 struct i40e_hmc_pd_entry *pd_entry;
@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
237 pd_addr = (u64 *)pd_table->pd_page_addr.va; 237 pd_addr = (u64 *)pd_table->pd_page_addr.va;
238 pd_addr += rel_pd_idx; 238 pd_addr += rel_pd_idx;
239 memset(pd_addr, 0, sizeof(u64)); 239 memset(pd_addr, 0, sizeof(u64));
240 if (is_pf) 240 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
241 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
242 else
243 I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
244 241
245 /* free memory here */ 242 /* free memory here */
246 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); 243 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 0cd4701234f8..b45d8fedc5e7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -163,11 +163,6 @@ struct i40e_hmc_info {
163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ 163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
165 165
166#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
167 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
168 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
169 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
170
171/** 166/**
172 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit 167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
173 * @hmc_info: pointer to the HMC configuration information structure 168 * @hmc_info: pointer to the HMC configuration information structure
@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
226 u32 pd_index); 221 u32 pd_index);
227i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 222i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
228 struct i40e_hmc_info *hmc_info, 223 struct i40e_hmc_info *hmc_info,
229 u32 idx, bool is_pf); 224 u32 idx);
230i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 225i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
231 u32 idx); 226 u32 idx);
232i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 227i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..870ab1ee072c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
397 /* remove the backing pages from pd_idx1 to i */ 397 /* remove the backing pages from pd_idx1 to i */
398 while (i && (i > pd_idx1)) { 398 while (i && (i > pd_idx1)) {
399 i40e_remove_pd_bp(hw, info->hmc_info, 399 i40e_remove_pd_bp(hw, info->hmc_info,
400 (i - 1), true); 400 (i - 1));
401 i--; 401 i--;
402 } 402 }
403 } 403 }
@@ -433,11 +433,7 @@ exit_sd_error:
433 ((j - 1) * I40E_HMC_MAX_BP_COUNT)); 433 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
434 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); 434 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
435 for (i = pd_idx1; i < pd_lmt1; i++) { 435 for (i = pd_idx1; i < pd_lmt1; i++) {
436 i40e_remove_pd_bp( 436 i40e_remove_pd_bp(hw, info->hmc_info, i);
437 hw,
438 info->hmc_info,
439 i,
440 true);
441 } 437 }
442 i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); 438 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
443 break; 439 break;
@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
616 pd_table = 612 pd_table =
617 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 613 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
618 if (pd_table->pd_entry[rel_pd_idx].valid) { 614 if (pd_table->pd_entry[rel_pd_idx].valid) {
619 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, 615 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
620 j, true);
621 if (ret_code) 616 if (ret_code)
622 goto exit; 617 goto exit;
623 } 618 }
@@ -747,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, 742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, 743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, 744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
750 { 0 } 746 { 0 }
751}; 747};
752 748
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
56 u8 tphdata_ena; 56 u8 tphdata_ena;
57 u8 tphhead_ena; 57 u8 tphhead_ena;
58 u8 lrxqthresh; 58 u8 lrxqthresh;
59 u8 prefena; /* NOTE: normally must be set to 1 at init */
59}; 60};
60 61
61/* Tx queue context data */ 62/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e72449f1265..275ca9a1719e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
38#define DRV_KERN "-k" 38#define DRV_KERN "-k"
39 39
40#define DRV_VERSION_MAJOR 0 40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 3 41#define DRV_VERSION_MINOR 4
42#define DRV_VERSION_BUILD 36 42#define DRV_VERSION_BUILD 10
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
67 */ 67 */
68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { 68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
@@ -356,6 +354,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
356 struct rtnl_link_stats64 *stats) 354 struct rtnl_link_stats64 *stats)
357{ 355{
358 struct i40e_netdev_priv *np = netdev_priv(netdev); 356 struct i40e_netdev_priv *np = netdev_priv(netdev);
357 struct i40e_ring *tx_ring, *rx_ring;
359 struct i40e_vsi *vsi = np->vsi; 358 struct i40e_vsi *vsi = np->vsi;
360 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 359 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
361 int i; 360 int i;
@@ -368,7 +367,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
368 367
369 rcu_read_lock(); 368 rcu_read_lock();
370 for (i = 0; i < vsi->num_queue_pairs; i++) { 369 for (i = 0; i < vsi->num_queue_pairs; i++) {
371 struct i40e_ring *tx_ring, *rx_ring;
372 u64 bytes, packets; 370 u64 bytes, packets;
373 unsigned int start; 371 unsigned int start;
374 372
@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
397 } 395 }
398 rcu_read_unlock(); 396 rcu_read_unlock();
399 397
400 /* following stats updated by ixgbe_watchdog_task() */ 398 /* following stats updated by i40e_watchdog_subtask() */
401 stats->multicast = vsi_stats->multicast; 399 stats->multicast = vsi_stats->multicast;
402 stats->tx_errors = vsi_stats->tx_errors; 400 stats->tx_errors = vsi_stats->tx_errors;
403 stats->tx_dropped = vsi_stats->tx_dropped; 401 stats->tx_dropped = vsi_stats->tx_dropped;
@@ -530,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
530 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 528 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
531 vsi->stat_offsets_loaded, 529 vsi->stat_offsets_loaded,
532 &oes->rx_discards, &es->rx_discards); 530 &oes->rx_discards, &es->rx_discards);
531 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
532 vsi->stat_offsets_loaded,
533 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
534 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
535 vsi->stat_offsets_loaded,
536 &oes->tx_errors, &es->tx_errors);
533 537
534 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 538 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
535 I40E_GLV_GORCL(stat_idx), 539 I40E_GLV_GORCL(stat_idx),
@@ -648,10 +652,10 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
648 return; 652 return;
649 653
650 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 654 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
651 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 655 for (v = 0; v < pf->num_alloc_vsi; v++) {
652 struct i40e_vsi *vsi = pf->vsi[v]; 656 struct i40e_vsi *vsi = pf->vsi[v];
653 657
654 if (!vsi) 658 if (!vsi || !vsi->tx_rings[0])
655 continue; 659 continue;
656 660
657 for (i = 0; i < vsi->num_queue_pairs; i++) { 661 for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -702,10 +706,10 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
702 } 706 }
703 707
704 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 708 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
705 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 709 for (v = 0; v < pf->num_alloc_vsi; v++) {
706 struct i40e_vsi *vsi = pf->vsi[v]; 710 struct i40e_vsi *vsi = pf->vsi[v];
707 711
708 if (!vsi) 712 if (!vsi || !vsi->tx_rings[0])
709 continue; 713 continue;
710 714
711 for (i = 0; i < vsi->num_queue_pairs; i++) { 715 for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -720,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
720} 724}
721 725
722/** 726/**
723 * i40e_update_stats - Update the board statistics counters. 727 * i40e_update_vsi_stats - Update the vsi statistics counters.
724 * @vsi: the VSI to be updated 728 * @vsi: the VSI to be updated
725 * 729 *
726 * There are a few instances where we store the same stat in a 730 * There are a few instances where we store the same stat in a
727 * couple of different structs. This is partly because we have 731 * couple of different structs. This is partly because we have
728 * the netdev stats that need to be filled out, which is slightly 732 * the netdev stats that need to be filled out, which is slightly
729 * different from the "eth_stats" defined by the chip and used in 733 * different from the "eth_stats" defined by the chip and used in
730 * VF communications. We sort it all out here in a central place. 734 * VF communications. We sort it out here.
731 **/ 735 **/
732void i40e_update_stats(struct i40e_vsi *vsi) 736static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
733{ 737{
734 struct i40e_pf *pf = vsi->back; 738 struct i40e_pf *pf = vsi->back;
735 struct i40e_hw *hw = &pf->hw;
736 struct rtnl_link_stats64 *ons; 739 struct rtnl_link_stats64 *ons;
737 struct rtnl_link_stats64 *ns; /* netdev stats */ 740 struct rtnl_link_stats64 *ns; /* netdev stats */
738 struct i40e_eth_stats *oes; 741 struct i40e_eth_stats *oes;
@@ -741,8 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
741 u32 rx_page, rx_buf; 744 u32 rx_page, rx_buf;
742 u64 rx_p, rx_b; 745 u64 rx_p, rx_b;
743 u64 tx_p, tx_b; 746 u64 tx_p, tx_b;
744 u32 val;
745 int i;
746 u16 q; 747 u16 q;
747 748
748 if (test_bit(__I40E_DOWN, &vsi->state) || 749 if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -804,196 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
804 ns->tx_packets = tx_p; 805 ns->tx_packets = tx_p;
805 ns->tx_bytes = tx_b; 806 ns->tx_bytes = tx_b;
806 807
807 i40e_update_eth_stats(vsi);
808 /* update netdev stats from eth stats */ 808 /* update netdev stats from eth stats */
809 ons->rx_errors = oes->rx_errors; 809 i40e_update_eth_stats(vsi);
810 ns->rx_errors = es->rx_errors;
811 ons->tx_errors = oes->tx_errors; 810 ons->tx_errors = oes->tx_errors;
812 ns->tx_errors = es->tx_errors; 811 ns->tx_errors = es->tx_errors;
813 ons->multicast = oes->rx_multicast; 812 ons->multicast = oes->rx_multicast;
814 ns->multicast = es->rx_multicast; 813 ns->multicast = es->rx_multicast;
814 ons->rx_dropped = oes->rx_discards;
815 ns->rx_dropped = es->rx_discards;
815 ons->tx_dropped = oes->tx_discards; 816 ons->tx_dropped = oes->tx_discards;
816 ns->tx_dropped = es->tx_discards; 817 ns->tx_dropped = es->tx_discards;
817 818
818 /* Get the port data only if this is the main PF VSI */ 819 /* pull in a couple PF stats if this is the main vsi */
819 if (vsi == pf->vsi[pf->lan_vsi]) { 820 if (vsi == pf->vsi[pf->lan_vsi]) {
820 struct i40e_hw_port_stats *nsd = &pf->stats; 821 ns->rx_crc_errors = pf->stats.crc_errors;
821 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 822 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
823 ns->rx_length_errors = pf->stats.rx_length_errors;
824 }
825}
822 826
823 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 827/**
824 I40E_GLPRT_GORCL(hw->port), 828 * i40e_update_pf_stats - Update the pf statistics counters.
825 pf->stat_offsets_loaded, 829 * @pf: the PF to be updated
826 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 830 **/
827 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 831static void i40e_update_pf_stats(struct i40e_pf *pf)
828 I40E_GLPRT_GOTCL(hw->port), 832{
829 pf->stat_offsets_loaded, 833 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
830 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 834 struct i40e_hw_port_stats *nsd = &pf->stats;
831 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 835 struct i40e_hw *hw = &pf->hw;
832 pf->stat_offsets_loaded, 836 u32 val;
833 &osd->eth.rx_discards, 837 int i;
834 &nsd->eth.rx_discards);
835 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
836 pf->stat_offsets_loaded,
837 &osd->eth.tx_discards,
838 &nsd->eth.tx_discards);
839 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
840 I40E_GLPRT_MPRCL(hw->port),
841 pf->stat_offsets_loaded,
842 &osd->eth.rx_multicast,
843 &nsd->eth.rx_multicast);
844 838
845 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 839 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
846 pf->stat_offsets_loaded, 840 I40E_GLPRT_GORCL(hw->port),
847 &osd->tx_dropped_link_down, 841 pf->stat_offsets_loaded,
848 &nsd->tx_dropped_link_down); 842 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
843 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
844 I40E_GLPRT_GOTCL(hw->port),
845 pf->stat_offsets_loaded,
846 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
847 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
848 pf->stat_offsets_loaded,
849 &osd->eth.rx_discards,
850 &nsd->eth.rx_discards);
851 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
852 pf->stat_offsets_loaded,
853 &osd->eth.tx_discards,
854 &nsd->eth.tx_discards);
849 855
850 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 856 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
851 pf->stat_offsets_loaded, 857 I40E_GLPRT_UPRCL(hw->port),
852 &osd->crc_errors, &nsd->crc_errors); 858 pf->stat_offsets_loaded,
853 ns->rx_crc_errors = nsd->crc_errors; 859 &osd->eth.rx_unicast,
860 &nsd->eth.rx_unicast);
861 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
862 I40E_GLPRT_MPRCL(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->eth.rx_multicast,
865 &nsd->eth.rx_multicast);
866 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
867 I40E_GLPRT_BPRCL(hw->port),
868 pf->stat_offsets_loaded,
869 &osd->eth.rx_broadcast,
870 &nsd->eth.rx_broadcast);
871 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
872 I40E_GLPRT_UPTCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.tx_unicast,
875 &nsd->eth.tx_unicast);
876 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
877 I40E_GLPRT_MPTCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.tx_multicast,
880 &nsd->eth.tx_multicast);
881 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
882 I40E_GLPRT_BPTCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.tx_broadcast,
885 &nsd->eth.tx_broadcast);
854 886
855 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 887 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
856 pf->stat_offsets_loaded, 888 pf->stat_offsets_loaded,
857 &osd->illegal_bytes, &nsd->illegal_bytes); 889 &osd->tx_dropped_link_down,
858 ns->rx_errors = nsd->crc_errors 890 &nsd->tx_dropped_link_down);
859 + nsd->illegal_bytes;
860 891
861 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 892 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
862 pf->stat_offsets_loaded, 893 pf->stat_offsets_loaded,
863 &osd->mac_local_faults, 894 &osd->crc_errors, &nsd->crc_errors);
864 &nsd->mac_local_faults);
865 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->mac_remote_faults,
868 &nsd->mac_remote_faults);
869 895
870 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 896 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
871 pf->stat_offsets_loaded, 897 pf->stat_offsets_loaded,
872 &osd->rx_length_errors, 898 &osd->illegal_bytes, &nsd->illegal_bytes);
873 &nsd->rx_length_errors);
874 ns->rx_length_errors = nsd->rx_length_errors;
875 899
876 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 900 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
877 pf->stat_offsets_loaded, 901 pf->stat_offsets_loaded,
878 &osd->link_xon_rx, &nsd->link_xon_rx); 902 &osd->mac_local_faults,
879 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 903 &nsd->mac_local_faults);
880 pf->stat_offsets_loaded, 904 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
881 &osd->link_xon_tx, &nsd->link_xon_tx); 905 pf->stat_offsets_loaded,
882 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 906 &osd->mac_remote_faults,
883 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 907 &nsd->mac_remote_faults);
884 pf->stat_offsets_loaded,
885 &osd->link_xoff_tx, &nsd->link_xoff_tx);
886
887 for (i = 0; i < 8; i++) {
888 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
889 pf->stat_offsets_loaded,
890 &osd->priority_xon_rx[i],
891 &nsd->priority_xon_rx[i]);
892 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
893 pf->stat_offsets_loaded,
894 &osd->priority_xon_tx[i],
895 &nsd->priority_xon_tx[i]);
896 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
897 pf->stat_offsets_loaded,
898 &osd->priority_xoff_tx[i],
899 &nsd->priority_xoff_tx[i]);
900 i40e_stat_update32(hw,
901 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
902 pf->stat_offsets_loaded,
903 &osd->priority_xon_2_xoff[i],
904 &nsd->priority_xon_2_xoff[i]);
905 }
906 908
907 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 909 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
908 I40E_GLPRT_PRC64L(hw->port), 910 pf->stat_offsets_loaded,
909 pf->stat_offsets_loaded, 911 &osd->rx_length_errors,
910 &osd->rx_size_64, &nsd->rx_size_64); 912 &nsd->rx_length_errors);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
912 I40E_GLPRT_PRC127L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_127, &nsd->rx_size_127);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
916 I40E_GLPRT_PRC255L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_255, &nsd->rx_size_255);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
920 I40E_GLPRT_PRC511L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_511, &nsd->rx_size_511);
923 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
924 I40E_GLPRT_PRC1023L(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_size_1023, &nsd->rx_size_1023);
927 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
928 I40E_GLPRT_PRC1522L(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->rx_size_1522, &nsd->rx_size_1522);
931 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
932 I40E_GLPRT_PRC9522L(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_size_big, &nsd->rx_size_big);
935 913
936 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 914 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
937 I40E_GLPRT_PTC64L(hw->port), 915 pf->stat_offsets_loaded,
938 pf->stat_offsets_loaded, 916 &osd->link_xon_rx, &nsd->link_xon_rx);
939 &osd->tx_size_64, &nsd->tx_size_64); 917 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
940 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 918 pf->stat_offsets_loaded,
941 I40E_GLPRT_PTC127L(hw->port), 919 &osd->link_xon_tx, &nsd->link_xon_tx);
942 pf->stat_offsets_loaded, 920 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
943 &osd->tx_size_127, &nsd->tx_size_127); 921 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
944 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 922 pf->stat_offsets_loaded,
945 I40E_GLPRT_PTC255L(hw->port), 923 &osd->link_xoff_tx, &nsd->link_xoff_tx);
946 pf->stat_offsets_loaded,
947 &osd->tx_size_255, &nsd->tx_size_255);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
949 I40E_GLPRT_PTC511L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_511, &nsd->tx_size_511);
952 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
953 I40E_GLPRT_PTC1023L(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_size_1023, &nsd->tx_size_1023);
956 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
957 I40E_GLPRT_PTC1522L(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->tx_size_1522, &nsd->tx_size_1522);
960 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
961 I40E_GLPRT_PTC9522L(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->tx_size_big, &nsd->tx_size_big);
964 924
965 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 925 for (i = 0; i < 8; i++) {
966 pf->stat_offsets_loaded, 926 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 &osd->rx_undersize, &nsd->rx_undersize);
968 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
969 pf->stat_offsets_loaded, 927 pf->stat_offsets_loaded,
970 &osd->rx_fragments, &nsd->rx_fragments); 928 &osd->priority_xon_rx[i],
971 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 929 &nsd->priority_xon_rx[i]);
930 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
972 pf->stat_offsets_loaded, 931 pf->stat_offsets_loaded,
973 &osd->rx_oversize, &nsd->rx_oversize); 932 &osd->priority_xon_tx[i],
974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 933 &nsd->priority_xon_tx[i]);
934 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded, 935 pf->stat_offsets_loaded,
976 &osd->rx_jabber, &nsd->rx_jabber); 936 &osd->priority_xoff_tx[i],
977 937 &nsd->priority_xoff_tx[i]);
978 val = rd32(hw, I40E_PRTPM_EEE_STAT); 938 i40e_stat_update32(hw,
979 nsd->tx_lpi_status = 939 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
982 nsd->rx_lpi_status =
983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
986 pf->stat_offsets_loaded, 940 pf->stat_offsets_loaded,
987 &osd->tx_lpi_count, &nsd->tx_lpi_count); 941 &osd->priority_xon_2_xoff[i],
988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 942 &nsd->priority_xon_2_xoff[i]);
989 pf->stat_offsets_loaded,
990 &osd->rx_lpi_count, &nsd->rx_lpi_count);
991 } 943 }
992 944
945 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
946 I40E_GLPRT_PRC64L(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->rx_size_64, &nsd->rx_size_64);
949 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
950 I40E_GLPRT_PRC127L(hw->port),
951 pf->stat_offsets_loaded,
952 &osd->rx_size_127, &nsd->rx_size_127);
953 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
954 I40E_GLPRT_PRC255L(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->rx_size_255, &nsd->rx_size_255);
957 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
958 I40E_GLPRT_PRC511L(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->rx_size_511, &nsd->rx_size_511);
961 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
962 I40E_GLPRT_PRC1023L(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->rx_size_1023, &nsd->rx_size_1023);
965 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
966 I40E_GLPRT_PRC1522L(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->rx_size_1522, &nsd->rx_size_1522);
969 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
970 I40E_GLPRT_PRC9522L(hw->port),
971 pf->stat_offsets_loaded,
972 &osd->rx_size_big, &nsd->rx_size_big);
973
974 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
975 I40E_GLPRT_PTC64L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->tx_size_64, &nsd->tx_size_64);
978 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
979 I40E_GLPRT_PTC127L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->tx_size_127, &nsd->tx_size_127);
982 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
983 I40E_GLPRT_PTC255L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->tx_size_255, &nsd->tx_size_255);
986 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
987 I40E_GLPRT_PTC511L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->tx_size_511, &nsd->tx_size_511);
990 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
991 I40E_GLPRT_PTC1023L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->tx_size_1023, &nsd->tx_size_1023);
994 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
995 I40E_GLPRT_PTC1522L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->tx_size_1522, &nsd->tx_size_1522);
998 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
999 I40E_GLPRT_PTC9522L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->tx_size_big, &nsd->tx_size_big);
1002
1003 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1004 pf->stat_offsets_loaded,
1005 &osd->rx_undersize, &nsd->rx_undersize);
1006 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_fragments, &nsd->rx_fragments);
1009 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->rx_oversize, &nsd->rx_oversize);
1012 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->rx_jabber, &nsd->rx_jabber);
1015
1016 /* FDIR stats */
1017 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1018 pf->stat_offsets_loaded,
1019 &osd->fd_atr_match, &nsd->fd_atr_match);
1020 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1021 pf->stat_offsets_loaded,
1022 &osd->fd_sb_match, &nsd->fd_sb_match);
1023
1024 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1025 nsd->tx_lpi_status =
1026 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1027 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1028 nsd->rx_lpi_status =
1029 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1030 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1031 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1032 pf->stat_offsets_loaded,
1033 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1034 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1035 pf->stat_offsets_loaded,
1036 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1037
993 pf->stat_offsets_loaded = true; 1038 pf->stat_offsets_loaded = true;
994} 1039}
995 1040
996/** 1041/**
1042 * i40e_update_stats - Update the various statistics counters.
1043 * @vsi: the VSI to be updated
1044 *
1045 * Update the various stats for this VSI and its related entities.
1046 **/
1047void i40e_update_stats(struct i40e_vsi *vsi)
1048{
1049 struct i40e_pf *pf = vsi->back;
1050
1051 if (vsi == pf->vsi[pf->lan_vsi])
1052 i40e_update_pf_stats(pf);
1053
1054 i40e_update_vsi_stats(vsi);
1055}
1056
1057/**
997 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1058 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
998 * @vsi: the VSI to be searched 1059 * @vsi: the VSI to be searched
999 * @macaddr: the MAC address 1060 * @macaddr: the MAC address
@@ -1101,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1101} 1162}
1102 1163
1103/** 1164/**
1165 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1166 * @vsi: the PF Main VSI - inappropriate for any other VSI
1167 * @macaddr: the MAC address
1168 **/
1169static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1170{
1171 struct i40e_aqc_remove_macvlan_element_data element;
1172 struct i40e_pf *pf = vsi->back;
1173 i40e_status aq_ret;
1174
1175 /* Only appropriate for the PF main VSI */
1176 if (vsi->type != I40E_VSI_MAIN)
1177 return;
1178
1179 ether_addr_copy(element.mac_addr, macaddr);
1180 element.vlan_tag = 0;
1181 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1182 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1183 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1184 if (aq_ret)
1185 dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
1186}
1187
1188/**
1104 * i40e_add_filter - Add a mac/vlan filter to the VSI 1189 * i40e_add_filter - Add a mac/vlan filter to the VSI
1105 * @vsi: the VSI to be searched 1190 * @vsi: the VSI to be searched
1106 * @macaddr: the MAC address 1191 * @macaddr: the MAC address
@@ -1125,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1125 if (!f) 1210 if (!f)
1126 goto add_filter_out; 1211 goto add_filter_out;
1127 1212
1128 memcpy(f->macaddr, macaddr, ETH_ALEN); 1213 ether_addr_copy(f->macaddr, macaddr);
1129 f->vlan = vlan; 1214 f->vlan = vlan;
1130 f->changed = true; 1215 f->changed = true;
1131 1216
@@ -1249,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1249 return -EADDRNOTAVAIL; 1334 return -EADDRNOTAVAIL;
1250 } 1335 }
1251 1336
1252 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); 1337 ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
1253 } 1338 }
1254 1339
1255 /* In order to be sure to not drop any packets, add the new address 1340 /* In order to be sure to not drop any packets, add the new address
@@ -1263,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1263 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); 1348 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1264 i40e_sync_vsi_filters(vsi); 1349 i40e_sync_vsi_filters(vsi);
1265 1350
1266 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1351 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1267 1352
1268 return 0; 1353 return 0;
1269} 1354}
@@ -1313,7 +1398,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1313 vsi->tc_config.numtc = numtc; 1398 vsi->tc_config.numtc = numtc;
1314 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1399 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1315 /* Number of queues per enabled TC */ 1400 /* Number of queues per enabled TC */
1316 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); 1401 num_tc_qps = vsi->alloc_queue_pairs/numtc;
1317 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1402 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1318 1403
1319 /* Setup queue offset/count for all TCs for given VSI */ 1404 /* Setup queue offset/count for all TCs for given VSI */
@@ -1520,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1520 cmd_flags = 0; 1605 cmd_flags = 0;
1521 1606
1522 /* add to delete list */ 1607 /* add to delete list */
1523 memcpy(del_list[num_del].mac_addr, 1608 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1524 f->macaddr, ETH_ALEN);
1525 del_list[num_del].vlan_tag = 1609 del_list[num_del].vlan_tag =
1526 cpu_to_le16((u16)(f->vlan == 1610 cpu_to_le16((u16)(f->vlan ==
1527 I40E_VLAN_ANY ? 0 : f->vlan)); 1611 I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1542,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1542 num_del = 0; 1626 num_del = 0;
1543 memset(del_list, 0, sizeof(*del_list)); 1627 memset(del_list, 0, sizeof(*del_list));
1544 1628
1545 if (aq_ret) 1629 if (aq_ret &&
1630 pf->hw.aq.asq_last_status !=
1631 I40E_AQ_RC_ENOENT)
1546 dev_info(&pf->pdev->dev, 1632 dev_info(&pf->pdev->dev,
1547 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1633 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1548 aq_ret, 1634 aq_ret,
@@ -1554,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1554 del_list, num_del, NULL); 1640 del_list, num_del, NULL);
1555 num_del = 0; 1641 num_del = 0;
1556 1642
1557 if (aq_ret) 1643 if (aq_ret &&
1644 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1558 dev_info(&pf->pdev->dev, 1645 dev_info(&pf->pdev->dev,
1559 "ignoring delete macvlan error, err %d, aq_err %d\n", 1646 "ignoring delete macvlan error, err %d, aq_err %d\n",
1560 aq_ret, pf->hw.aq.asq_last_status); 1647 aq_ret, pf->hw.aq.asq_last_status);
@@ -1583,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1583 cmd_flags = 0; 1670 cmd_flags = 0;
1584 1671
1585 /* add to add array */ 1672 /* add to add array */
1586 memcpy(add_list[num_add].mac_addr, 1673 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1587 f->macaddr, ETH_ALEN);
1588 add_list[num_add].vlan_tag = 1674 add_list[num_add].vlan_tag =
1589 cpu_to_le16( 1675 cpu_to_le16(
1590 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1676 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1681,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1681 return; 1767 return;
1682 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1768 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1683 1769
1684 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1770 for (v = 0; v < pf->num_alloc_vsi; v++) {
1685 if (pf->vsi[v] && 1771 if (pf->vsi[v] &&
1686 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1772 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1687 i40e_sync_vsi_filters(pf->vsi[v]); 1773 i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1698,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1698static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1784static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1699{ 1785{
1700 struct i40e_netdev_priv *np = netdev_priv(netdev); 1786 struct i40e_netdev_priv *np = netdev_priv(netdev);
1701 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1787 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1702 struct i40e_vsi *vsi = np->vsi; 1788 struct i40e_vsi *vsi = np->vsi;
1703 1789
1704 /* MTU < 68 is an error and causes problems on some kernels */ 1790 /* MTU < 68 is an error and causes problems on some kernels */
@@ -2312,6 +2398,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2312 rx_ctx.crcstrip = 1; 2398 rx_ctx.crcstrip = 1;
2313 rx_ctx.l2tsel = 1; 2399 rx_ctx.l2tsel = 1;
2314 rx_ctx.showiv = 1; 2400 rx_ctx.showiv = 1;
2401 /* set the prefena field to 1 because the manual says to */
2402 rx_ctx.prefena = 1;
2315 2403
2316 /* clear the context in the HMC */ 2404 /* clear the context in the HMC */
2317 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2405 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2413,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2413 **/ 2501 **/
2414static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2502static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2415{ 2503{
2504 struct i40e_ring *tx_ring, *rx_ring;
2416 u16 qoffset, qcount; 2505 u16 qoffset, qcount;
2417 int i, n; 2506 int i, n;
2418 2507
@@ -2426,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2426 qoffset = vsi->tc_config.tc_info[n].qoffset; 2515 qoffset = vsi->tc_config.tc_info[n].qoffset;
2427 qcount = vsi->tc_config.tc_info[n].qcount; 2516 qcount = vsi->tc_config.tc_info[n].qcount;
2428 for (i = qoffset; i < (qoffset + qcount); i++) { 2517 for (i = qoffset; i < (qoffset + qcount); i++) {
2429 struct i40e_ring *rx_ring = vsi->rx_rings[i]; 2518 rx_ring = vsi->rx_rings[i];
2430 struct i40e_ring *tx_ring = vsi->tx_rings[i]; 2519 tx_ring = vsi->tx_rings[i];
2431 rx_ring->dcb_tc = n; 2520 rx_ring->dcb_tc = n;
2432 tx_ring->dcb_tc = n; 2521 tx_ring->dcb_tc = n;
2433 } 2522 }
@@ -2565,7 +2654,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2565 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2654 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2566 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2655 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2567 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | 2656 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
2568 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2569 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2657 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2570 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2658 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2571 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2659 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2733,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2733 &q_vector->affinity_mask); 2821 &q_vector->affinity_mask);
2734 } 2822 }
2735 2823
2824 vsi->irqs_ready = true;
2736 return 0; 2825 return 0;
2737 2826
2738free_queue_irqs: 2827free_queue_irqs:
@@ -3152,6 +3241,12 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3152 3241
3153 pf_q = vsi->base_queue; 3242 pf_q = vsi->base_queue;
3154 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3243 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3244
3245 /* warn the TX unit of coming changes */
3246 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3247 if (!enable)
3248 udelay(10);
3249
3155 for (j = 0; j < 50; j++) { 3250 for (j = 0; j < 50; j++) {
3156 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3251 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3157 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3252 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
@@ -3160,9 +3255,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3160 usleep_range(1000, 2000); 3255 usleep_range(1000, 2000);
3161 } 3256 }
3162 /* Skip if the queue is already in the requested state */ 3257 /* Skip if the queue is already in the requested state */
3163 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3258 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3164 continue;
3165 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3166 continue; 3259 continue;
3167 3260
3168 /* turn on/off the queue */ 3261 /* turn on/off the queue */
@@ -3178,13 +3271,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3178 /* wait for the change to finish */ 3271 /* wait for the change to finish */
3179 for (j = 0; j < 10; j++) { 3272 for (j = 0; j < 10; j++) {
3180 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3273 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3181 if (enable) { 3274 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3182 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3275 break;
3183 break;
3184 } else {
3185 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3186 break;
3187 }
3188 3276
3189 udelay(10); 3277 udelay(10);
3190 } 3278 }
@@ -3223,15 +3311,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3223 usleep_range(1000, 2000); 3311 usleep_range(1000, 2000);
3224 } 3312 }
3225 3313
3226 if (enable) { 3314 /* Skip if the queue is already in the requested state */
3227 /* is STAT set ? */ 3315 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3228 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3316 continue;
3229 continue;
3230 } else {
3231 /* is !STAT set ? */
3232 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3233 continue;
3234 }
3235 3317
3236 /* turn on/off the queue */ 3318 /* turn on/off the queue */
3237 if (enable) 3319 if (enable)
@@ -3244,13 +3326,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3244 for (j = 0; j < 10; j++) { 3326 for (j = 0; j < 10; j++) {
3245 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3327 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3246 3328
3247 if (enable) { 3329 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3248 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3330 break;
3249 break;
3250 } else {
3251 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3252 break;
3253 }
3254 3331
3255 udelay(10); 3332 udelay(10);
3256 } 3333 }
@@ -3304,6 +3381,10 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3304 if (!vsi->q_vectors) 3381 if (!vsi->q_vectors)
3305 return; 3382 return;
3306 3383
3384 if (!vsi->irqs_ready)
3385 return;
3386
3387 vsi->irqs_ready = false;
3307 for (i = 0; i < vsi->num_q_vectors; i++) { 3388 for (i = 0; i < vsi->num_q_vectors; i++) {
3308 u16 vector = i + base; 3389 u16 vector = i + base;
3309 3390
@@ -3476,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3476 int i; 3557 int i;
3477 3558
3478 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3559 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3479 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3560 for (i = 0; i < pf->num_alloc_vsi; i++)
3480 if (pf->vsi[i]) 3561 if (pf->vsi[i])
3481 i40e_vsi_free_q_vectors(pf->vsi[i]); 3562 i40e_vsi_free_q_vectors(pf->vsi[i]);
3482 i40e_reset_interrupt_capability(pf); 3563 i40e_reset_interrupt_capability(pf);
@@ -3513,6 +3594,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3513} 3594}
3514 3595
3515/** 3596/**
3597 * i40e_vsi_close - Shut down a VSI
3598 * @vsi: the vsi to be quelled
3599 **/
3600static void i40e_vsi_close(struct i40e_vsi *vsi)
3601{
3602 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3603 i40e_down(vsi);
3604 i40e_vsi_free_irq(vsi);
3605 i40e_vsi_free_tx_resources(vsi);
3606 i40e_vsi_free_rx_resources(vsi);
3607}
3608
3609/**
3516 * i40e_quiesce_vsi - Pause a given VSI 3610 * i40e_quiesce_vsi - Pause a given VSI
3517 * @vsi: the VSI being paused 3611 * @vsi: the VSI being paused
3518 **/ 3612 **/
@@ -3525,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3525 if (vsi->netdev && netif_running(vsi->netdev)) { 3619 if (vsi->netdev && netif_running(vsi->netdev)) {
3526 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3620 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3527 } else { 3621 } else {
3528 set_bit(__I40E_DOWN, &vsi->state); 3622 i40e_vsi_close(vsi);
3529 i40e_down(vsi);
3530 } 3623 }
3531} 3624}
3532 3625
@@ -3543,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3543 if (vsi->netdev && netif_running(vsi->netdev)) 3636 if (vsi->netdev && netif_running(vsi->netdev))
3544 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3637 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3545 else 3638 else
3546 i40e_up(vsi); /* this clears the DOWN bit */ 3639 i40e_vsi_open(vsi); /* this clears the DOWN bit */
3547} 3640}
3548 3641
3549/** 3642/**
@@ -3554,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3554{ 3647{
3555 int v; 3648 int v;
3556 3649
3557 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3650 for (v = 0; v < pf->num_alloc_vsi; v++) {
3558 if (pf->vsi[v]) 3651 if (pf->vsi[v])
3559 i40e_quiesce_vsi(pf->vsi[v]); 3652 i40e_quiesce_vsi(pf->vsi[v]);
3560 } 3653 }
@@ -3568,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3568{ 3661{
3569 int v; 3662 int v;
3570 3663
3571 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3664 for (v = 0; v < pf->num_alloc_vsi; v++) {
3572 if (pf->vsi[v]) 3665 if (pf->vsi[v])
3573 i40e_unquiesce_vsi(pf->vsi[v]); 3666 i40e_unquiesce_vsi(pf->vsi[v]);
3574 } 3667 }
@@ -4009,7 +4102,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4009 } 4102 }
4010 4103
4011 /* Update each VSI */ 4104 /* Update each VSI */
4012 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4105 for (v = 0; v < pf->num_alloc_vsi; v++) {
4013 if (!pf->vsi[v]) 4106 if (!pf->vsi[v])
4014 continue; 4107 continue;
4015 4108
@@ -4028,6 +4121,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4028 pf->vsi[v]->seid); 4121 pf->vsi[v]->seid);
4029 /* Will try to configure as many components */ 4122 /* Will try to configure as many components */
4030 } else { 4123 } else {
4124 /* Re-configure VSI vectors based on updated TC map */
4125 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4031 if (pf->vsi[v]->netdev) 4126 if (pf->vsi[v]->netdev)
4032 i40e_dcbnl_set_all(pf->vsi[v]); 4127 i40e_dcbnl_set_all(pf->vsi[v]);
4033 } 4128 }
@@ -4065,14 +4160,69 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4065 /* When status is not DISABLED then DCBX in FW */ 4160 /* When status is not DISABLED then DCBX in FW */
4066 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4161 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4067 DCB_CAP_DCBX_VER_IEEE; 4162 DCB_CAP_DCBX_VER_IEEE;
4068 pf->flags |= I40E_FLAG_DCB_ENABLED; 4163
4164 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4165 /* Enable DCB tagging only when more than one TC */
4166 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4167 pf->flags |= I40E_FLAG_DCB_ENABLED;
4069 } 4168 }
4169 } else {
4170 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4171 pf->hw.aq.asq_last_status);
4070 } 4172 }
4071 4173
4072out: 4174out:
4073 return err; 4175 return err;
4074} 4176}
4075#endif /* CONFIG_I40E_DCB */ 4177#endif /* CONFIG_I40E_DCB */
4178#define SPEED_SIZE 14
4179#define FC_SIZE 8
4180/**
4181 * i40e_print_link_message - print link up or down
4182 * @vsi: the VSI for which link needs a message
4183 */
4184static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4185{
4186 char speed[SPEED_SIZE] = "Unknown";
4187 char fc[FC_SIZE] = "RX/TX";
4188
4189 if (!isup) {
4190 netdev_info(vsi->netdev, "NIC Link is Down\n");
4191 return;
4192 }
4193
4194 switch (vsi->back->hw.phy.link_info.link_speed) {
4195 case I40E_LINK_SPEED_40GB:
4196 strncpy(speed, "40 Gbps", SPEED_SIZE);
4197 break;
4198 case I40E_LINK_SPEED_10GB:
4199 strncpy(speed, "10 Gbps", SPEED_SIZE);
4200 break;
4201 case I40E_LINK_SPEED_1GB:
4202 strncpy(speed, "1000 Mbps", SPEED_SIZE);
4203 break;
4204 default:
4205 break;
4206 }
4207
4208 switch (vsi->back->hw.fc.current_mode) {
4209 case I40E_FC_FULL:
4210 strncpy(fc, "RX/TX", FC_SIZE);
4211 break;
4212 case I40E_FC_TX_PAUSE:
4213 strncpy(fc, "TX", FC_SIZE);
4214 break;
4215 case I40E_FC_RX_PAUSE:
4216 strncpy(fc, "RX", FC_SIZE);
4217 break;
4218 default:
4219 strncpy(fc, "None", FC_SIZE);
4220 break;
4221 }
4222
4223 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4224 speed, fc);
4225}
4076 4226
4077/** 4227/**
4078 * i40e_up_complete - Finish the last steps of bringing up a connection 4228 * i40e_up_complete - Finish the last steps of bringing up a connection
@@ -4099,11 +4249,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4099 4249
4100 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4250 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4101 (vsi->netdev)) { 4251 (vsi->netdev)) {
4102 netdev_info(vsi->netdev, "NIC Link is Up\n"); 4252 i40e_print_link_message(vsi, true);
4103 netif_tx_start_all_queues(vsi->netdev); 4253 netif_tx_start_all_queues(vsi->netdev);
4104 netif_carrier_on(vsi->netdev); 4254 netif_carrier_on(vsi->netdev);
4105 } else if (vsi->netdev) { 4255 } else if (vsi->netdev) {
4106 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4256 i40e_print_link_message(vsi, false);
4107 } 4257 }
4108 4258
4109 /* replay FDIR SB filters */ 4259 /* replay FDIR SB filters */
@@ -4309,24 +4459,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
4309 if (err) 4459 if (err)
4310 goto err_setup_rx; 4460 goto err_setup_rx;
4311 4461
4312 if (!vsi->netdev) { 4462 if (vsi->netdev) {
4313 err = EINVAL; 4463 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4314 goto err_setup_rx; 4464 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4315 } 4465 err = i40e_vsi_request_irq(vsi, int_name);
4316 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4466 if (err)
4317 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4467 goto err_setup_rx;
4318 err = i40e_vsi_request_irq(vsi, int_name);
4319 if (err)
4320 goto err_setup_rx;
4321 4468
4322 /* Notify the stack of the actual queue counts. */ 4469 /* Notify the stack of the actual queue counts. */
4323 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); 4470 err = netif_set_real_num_tx_queues(vsi->netdev,
4324 if (err) 4471 vsi->num_queue_pairs);
4325 goto err_set_queues; 4472 if (err)
4473 goto err_set_queues;
4326 4474
4327 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); 4475 err = netif_set_real_num_rx_queues(vsi->netdev,
4328 if (err) 4476 vsi->num_queue_pairs);
4329 goto err_set_queues; 4477 if (err)
4478 goto err_set_queues;
4479
4480 } else if (vsi->type == I40E_VSI_FDIR) {
4481 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4482 dev_driver_string(&pf->pdev->dev));
4483 err = i40e_vsi_request_irq(vsi, int_name);
4484 } else {
4485 err = -EINVAL;
4486 goto err_setup_rx;
4487 }
4330 4488
4331 err = i40e_up_complete(vsi); 4489 err = i40e_up_complete(vsi);
4332 if (err) 4490 if (err)
@@ -4383,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
4383 struct i40e_netdev_priv *np = netdev_priv(netdev); 4541 struct i40e_netdev_priv *np = netdev_priv(netdev);
4384 struct i40e_vsi *vsi = np->vsi; 4542 struct i40e_vsi *vsi = np->vsi;
4385 4543
4386 if (test_and_set_bit(__I40E_DOWN, &vsi->state)) 4544 i40e_vsi_close(vsi);
4387 return 0;
4388
4389 i40e_down(vsi);
4390 i40e_vsi_free_irq(vsi);
4391
4392 i40e_vsi_free_tx_resources(vsi);
4393 i40e_vsi_free_rx_resources(vsi);
4394 4545
4395 return 0; 4546 return 0;
4396} 4547}
@@ -4410,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4410 4561
4411 WARN_ON(in_interrupt()); 4562 WARN_ON(in_interrupt());
4412 4563
4564 if (i40e_check_asq_alive(&pf->hw))
4565 i40e_vc_notify_reset(pf);
4566
4413 /* do the biggest reset indicated */ 4567 /* do the biggest reset indicated */
4414 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4568 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4415 4569
@@ -4475,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4475 /* Find the VSI(s) that requested a re-init */ 4629 /* Find the VSI(s) that requested a re-init */
4476 dev_info(&pf->pdev->dev, 4630 dev_info(&pf->pdev->dev,
4477 "VSI reinit requested\n"); 4631 "VSI reinit requested\n");
4478 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4632 for (v = 0; v < pf->num_alloc_vsi; v++) {
4479 struct i40e_vsi *vsi = pf->vsi[v]; 4633 struct i40e_vsi *vsi = pf->vsi[v];
4480 if (vsi != NULL && 4634 if (vsi != NULL &&
4481 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4635 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4565,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4565 int ret = 0; 4719 int ret = 0;
4566 u8 type; 4720 u8 type;
4567 4721
4722 /* Not DCB capable or capability disabled */
4723 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
4724 return ret;
4725
4568 /* Ignore if event is not for Nearest Bridge */ 4726 /* Ignore if event is not for Nearest Bridge */
4569 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 4727 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4570 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 4728 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
@@ -4606,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4606 if (!need_reconfig) 4764 if (!need_reconfig)
4607 goto exit; 4765 goto exit;
4608 4766
4767 /* Enable DCB tagging only when more than one TC */
4768 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
4769 pf->flags |= I40E_FLAG_DCB_ENABLED;
4770 else
4771 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
4772
4609 /* Reconfiguration needed quiesce all VSIs */ 4773 /* Reconfiguration needed quiesce all VSIs */
4610 i40e_pf_quiesce_all_vsi(pf); 4774 i40e_pf_quiesce_all_vsi(pf);
4611 4775
@@ -4709,8 +4873,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4709 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4873 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4710 return; 4874 return;
4711 fcnt_prog = i40e_get_current_fd_count(pf); 4875 fcnt_prog = i40e_get_current_fd_count(pf);
4712 fcnt_avail = pf->hw.fdir_shared_filter_count + 4876 fcnt_avail = i40e_get_fd_cnt_all(pf);
4713 pf->fdir_pf_filter_count;
4714 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 4877 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4715 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 4878 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4716 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 4879 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4803,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4803 i40e_veb_link_event(pf->veb[i], link_up); 4966 i40e_veb_link_event(pf->veb[i], link_up);
4804 4967
4805 /* ... now the local VSIs */ 4968 /* ... now the local VSIs */
4806 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4969 for (i = 0; i < pf->num_alloc_vsi; i++)
4807 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4970 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4808 i40e_vsi_link_event(pf->vsi[i], link_up); 4971 i40e_vsi_link_event(pf->vsi[i], link_up);
4809} 4972}
@@ -4821,10 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
4821 4984
4822 if (new_link == old_link) 4985 if (new_link == old_link)
4823 return; 4986 return;
4824
4825 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 4987 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4826 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4988 i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
4827 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4828 4989
4829 /* Notify the base of the switch tree connected to 4990 /* Notify the base of the switch tree connected to
4830 * the link. Floating VEBs are not notified. 4991 * the link. Floating VEBs are not notified.
@@ -4862,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
4862 * for each q_vector 5023 * for each q_vector
4863 * force an interrupt 5024 * force an interrupt
4864 */ 5025 */
4865 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5026 for (v = 0; v < pf->num_alloc_vsi; v++) {
4866 struct i40e_vsi *vsi = pf->vsi[v]; 5027 struct i40e_vsi *vsi = pf->vsi[v];
4867 int armed = 0; 5028 int armed = 0;
4868 5029
@@ -4912,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
4912 /* Update the stats for active netdevs so the network stack 5073 /* Update the stats for active netdevs so the network stack
4913 * can look at updated numbers whenever it cares to 5074 * can look at updated numbers whenever it cares to
4914 */ 5075 */
4915 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 5076 for (i = 0; i < pf->num_alloc_vsi; i++)
4916 if (pf->vsi[i] && pf->vsi[i]->netdev) 5077 if (pf->vsi[i] && pf->vsi[i]->netdev)
4917 i40e_update_stats(pf->vsi[i]); 5078 i40e_update_stats(pf->vsi[i]);
4918 5079
@@ -5018,11 +5179,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5018 u16 pending, i = 0; 5179 u16 pending, i = 0;
5019 i40e_status ret; 5180 i40e_status ret;
5020 u16 opcode; 5181 u16 opcode;
5182 u32 oldval;
5021 u32 val; 5183 u32 val;
5022 5184
5023 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 5185 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5024 return; 5186 return;
5025 5187
5188 /* check for error indications */
5189 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5190 oldval = val;
5191 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5192 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5193 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5194 }
5195 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5196 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5197 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5198 }
5199 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5200 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5201 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5202 }
5203 if (oldval != val)
5204 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5205
5206 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5207 oldval = val;
5208 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5209 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5210 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5211 }
5212 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5213 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5214 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5215 }
5216 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5217 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5218 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5219 }
5220 if (oldval != val)
5221 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5222
5026 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5223 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
5027 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5224 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5028 if (!event.msg_buf) 5225 if (!event.msg_buf)
@@ -5128,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5128 int ret; 5325 int ret;
5129 5326
5130 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5327 /* build VSI that owns this VEB, temporarily attached to base VEB */
5131 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 5328 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5132 if (pf->vsi[v] && 5329 if (pf->vsi[v] &&
5133 pf->vsi[v]->veb_idx == veb->idx && 5330 pf->vsi[v]->veb_idx == veb->idx &&
5134 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5331 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5158,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5158 goto end_reconstitute; 5355 goto end_reconstitute;
5159 5356
5160 /* create the remaining VSIs attached to this VEB */ 5357 /* create the remaining VSIs attached to this VEB */
5161 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5358 for (v = 0; v < pf->num_alloc_vsi; v++) {
5162 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5359 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5163 continue; 5360 continue;
5164 5361
@@ -5226,9 +5423,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
5226 } 5423 }
5227 } while (err); 5424 } while (err);
5228 5425
5229 /* increment MSI-X count because current FW skips one */
5230 pf->hw.func_caps.num_msix_vectors++;
5231
5232 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 5426 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5233 (pf->hw.aq.fw_maj_ver < 2)) { 5427 (pf->hw.aq.fw_maj_ver < 2)) {
5234 pf->hw.func_caps.num_msix_vectors++; 5428 pf->hw.func_caps.num_msix_vectors++;
@@ -5267,15 +5461,14 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
5267static void i40e_fdir_sb_setup(struct i40e_pf *pf) 5461static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5268{ 5462{
5269 struct i40e_vsi *vsi; 5463 struct i40e_vsi *vsi;
5270 bool new_vsi = false; 5464 int i;
5271 int err, i;
5272 5465
5273 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5466 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
5274 return; 5467 return;
5275 5468
5276 /* find existing VSI and see if it needs configuring */ 5469 /* find existing VSI and see if it needs configuring */
5277 vsi = NULL; 5470 vsi = NULL;
5278 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5471 for (i = 0; i < pf->num_alloc_vsi; i++) {
5279 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5472 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5280 vsi = pf->vsi[i]; 5473 vsi = pf->vsi[i];
5281 break; 5474 break;
@@ -5288,47 +5481,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5288 pf->vsi[pf->lan_vsi]->seid, 0); 5481 pf->vsi[pf->lan_vsi]->seid, 0);
5289 if (!vsi) { 5482 if (!vsi) {
5290 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 5483 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
5291 goto err_vsi; 5484 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5485 return;
5292 } 5486 }
5293 new_vsi = true;
5294 }
5295 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5296
5297 err = i40e_vsi_setup_tx_resources(vsi);
5298 if (err)
5299 goto err_setup_tx;
5300 err = i40e_vsi_setup_rx_resources(vsi);
5301 if (err)
5302 goto err_setup_rx;
5303
5304 if (new_vsi) {
5305 char int_name[IFNAMSIZ + 9];
5306 err = i40e_vsi_configure(vsi);
5307 if (err)
5308 goto err_setup_rx;
5309 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
5310 dev_driver_string(&pf->pdev->dev));
5311 err = i40e_vsi_request_irq(vsi, int_name);
5312 if (err)
5313 goto err_setup_rx;
5314 err = i40e_up_complete(vsi);
5315 if (err)
5316 goto err_up_complete;
5317 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5318 } 5487 }
5319 5488
5320 return; 5489 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5321
5322err_up_complete:
5323 i40e_down(vsi);
5324 i40e_vsi_free_irq(vsi);
5325err_setup_rx:
5326 i40e_vsi_free_rx_resources(vsi);
5327err_setup_tx:
5328 i40e_vsi_free_tx_resources(vsi);
5329err_vsi:
5330 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5331 i40e_vsi_clear(vsi);
5332} 5490}
5333 5491
5334/** 5492/**
@@ -5340,7 +5498,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5340 int i; 5498 int i;
5341 5499
5342 i40e_fdir_filter_exit(pf); 5500 i40e_fdir_filter_exit(pf);
5343 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5501 for (i = 0; i < pf->num_alloc_vsi; i++) {
5344 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5502 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5345 i40e_vsi_release(pf->vsi[i]); 5503 i40e_vsi_release(pf->vsi[i]);
5346 break; 5504 break;
@@ -5357,7 +5515,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5357static int i40e_prep_for_reset(struct i40e_pf *pf) 5515static int i40e_prep_for_reset(struct i40e_pf *pf)
5358{ 5516{
5359 struct i40e_hw *hw = &pf->hw; 5517 struct i40e_hw *hw = &pf->hw;
5360 i40e_status ret; 5518 i40e_status ret = 0;
5361 u32 v; 5519 u32 v;
5362 5520
5363 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 5521 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
@@ -5366,13 +5524,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5366 5524
5367 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5525 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5368 5526
5369 if (i40e_check_asq_alive(hw))
5370 i40e_vc_notify_reset(pf);
5371
5372 /* quiesce the VSIs and their queues that are not already DOWN */ 5527 /* quiesce the VSIs and their queues that are not already DOWN */
5373 i40e_pf_quiesce_all_vsi(pf); 5528 i40e_pf_quiesce_all_vsi(pf);
5374 5529
5375 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5530 for (v = 0; v < pf->num_alloc_vsi; v++) {
5376 if (pf->vsi[v]) 5531 if (pf->vsi[v])
5377 pf->vsi[v]->seid = 0; 5532 pf->vsi[v]->seid = 0;
5378 } 5533 }
@@ -5380,22 +5535,40 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5380 i40e_shutdown_adminq(&pf->hw); 5535 i40e_shutdown_adminq(&pf->hw);
5381 5536
5382 /* call shutdown HMC */ 5537 /* call shutdown HMC */
5383 ret = i40e_shutdown_lan_hmc(hw); 5538 if (hw->hmc.hmc_obj) {
5384 if (ret) { 5539 ret = i40e_shutdown_lan_hmc(hw);
5385 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); 5540 if (ret) {
5386 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5541 dev_warn(&pf->pdev->dev,
5542 "shutdown_lan_hmc failed: %d\n", ret);
5543 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5544 }
5387 } 5545 }
5388 return ret; 5546 return ret;
5389} 5547}
5390 5548
5391/** 5549/**
5550 * i40e_send_version - update firmware with driver version
5551 * @pf: PF struct
5552 */
5553static void i40e_send_version(struct i40e_pf *pf)
5554{
5555 struct i40e_driver_version dv;
5556
5557 dv.major_version = DRV_VERSION_MAJOR;
5558 dv.minor_version = DRV_VERSION_MINOR;
5559 dv.build_version = DRV_VERSION_BUILD;
5560 dv.subbuild_version = 0;
5561 strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
5562 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5563}
5564
5565/**
5392 * i40e_reset_and_rebuild - reset and rebuild using a saved config 5566 * i40e_reset_and_rebuild - reset and rebuild using a saved config
5393 * @pf: board private structure 5567 * @pf: board private structure
5394 * @reinit: if the Main VSI needs to re-initialized. 5568 * @reinit: if the Main VSI needs to re-initialized.
5395 **/ 5569 **/
5396static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 5570static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5397{ 5571{
5398 struct i40e_driver_version dv;
5399 struct i40e_hw *hw = &pf->hw; 5572 struct i40e_hw *hw = &pf->hw;
5400 i40e_status ret; 5573 i40e_status ret;
5401 u32 v; 5574 u32 v;
@@ -5405,8 +5578,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5405 * because the reset will make them disappear. 5578 * because the reset will make them disappear.
5406 */ 5579 */
5407 ret = i40e_pf_reset(hw); 5580 ret = i40e_pf_reset(hw);
5408 if (ret) 5581 if (ret) {
5409 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5582 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5583 goto end_core_reset;
5584 }
5410 pf->pfr_count++; 5585 pf->pfr_count++;
5411 5586
5412 if (test_bit(__I40E_DOWN, &pf->state)) 5587 if (test_bit(__I40E_DOWN, &pf->state))
@@ -5426,6 +5601,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5426 i40e_verify_eeprom(pf); 5601 i40e_verify_eeprom(pf);
5427 } 5602 }
5428 5603
5604 i40e_clear_pxe_mode(hw);
5429 ret = i40e_get_capabilities(pf); 5605 ret = i40e_get_capabilities(pf);
5430 if (ret) { 5606 if (ret) {
5431 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5607 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5526,13 +5702,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5526 } 5702 }
5527 5703
5528 /* tell the firmware that we're starting */ 5704 /* tell the firmware that we're starting */
5529 dv.major_version = DRV_VERSION_MAJOR; 5705 i40e_send_version(pf);
5530 dv.minor_version = DRV_VERSION_MINOR;
5531 dv.build_version = DRV_VERSION_BUILD;
5532 dv.subbuild_version = 0;
5533 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5534
5535 dev_info(&pf->pdev->dev, "reset complete\n");
5536 5706
5537end_core_reset: 5707end_core_reset:
5538 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5708 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5642,7 +5812,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5642 **/ 5812 **/
5643static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 5813static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5644{ 5814{
5645 const int vxlan_hdr_qwords = 4;
5646 struct i40e_hw *hw = &pf->hw; 5815 struct i40e_hw *hw = &pf->hw;
5647 i40e_status ret; 5816 i40e_status ret;
5648 u8 filter_index; 5817 u8 filter_index;
@@ -5660,7 +5829,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5660 port = pf->vxlan_ports[i]; 5829 port = pf->vxlan_ports[i];
5661 ret = port ? 5830 ret = port ?
5662 i40e_aq_add_udp_tunnel(hw, ntohs(port), 5831 i40e_aq_add_udp_tunnel(hw, ntohs(port),
5663 vxlan_hdr_qwords,
5664 I40E_AQC_TUNNEL_TYPE_VXLAN, 5832 I40E_AQC_TUNNEL_TYPE_VXLAN,
5665 &filter_index, NULL) 5833 &filter_index, NULL)
5666 : i40e_aq_del_udp_tunnel(hw, i, NULL); 5834 : i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5839,15 +6007,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5839 * find next empty vsi slot, looping back around if necessary 6007 * find next empty vsi slot, looping back around if necessary
5840 */ 6008 */
5841 i = pf->next_vsi; 6009 i = pf->next_vsi;
5842 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 6010 while (i < pf->num_alloc_vsi && pf->vsi[i])
5843 i++; 6011 i++;
5844 if (i >= pf->hw.func_caps.num_vsis) { 6012 if (i >= pf->num_alloc_vsi) {
5845 i = 0; 6013 i = 0;
5846 while (i < pf->next_vsi && pf->vsi[i]) 6014 while (i < pf->next_vsi && pf->vsi[i])
5847 i++; 6015 i++;
5848 } 6016 }
5849 6017
5850 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 6018 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
5851 vsi_idx = i; /* Found one! */ 6019 vsi_idx = i; /* Found one! */
5852 } else { 6020 } else {
5853 ret = -ENODEV; 6021 ret = -ENODEV;
@@ -5870,6 +6038,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5870 vsi->netdev_registered = false; 6038 vsi->netdev_registered = false;
5871 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6039 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5872 INIT_LIST_HEAD(&vsi->mac_filter_list); 6040 INIT_LIST_HEAD(&vsi->mac_filter_list);
6041 vsi->irqs_ready = false;
5873 6042
5874 ret = i40e_set_num_rings_in_vsi(vsi); 6043 ret = i40e_set_num_rings_in_vsi(vsi);
5875 if (ret) 6044 if (ret)
@@ -5987,14 +6156,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5987 **/ 6156 **/
5988static int i40e_alloc_rings(struct i40e_vsi *vsi) 6157static int i40e_alloc_rings(struct i40e_vsi *vsi)
5989{ 6158{
6159 struct i40e_ring *tx_ring, *rx_ring;
5990 struct i40e_pf *pf = vsi->back; 6160 struct i40e_pf *pf = vsi->back;
5991 int i; 6161 int i;
5992 6162
5993 /* Set basic values in the rings to be used later during open() */ 6163 /* Set basic values in the rings to be used later during open() */
5994 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6164 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5995 struct i40e_ring *tx_ring;
5996 struct i40e_ring *rx_ring;
5997
5998 /* allocate space for both Tx and Rx in one shot */ 6165 /* allocate space for both Tx and Rx in one shot */
5999 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 6166 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6000 if (!tx_ring) 6167 if (!tx_ring)
@@ -6052,8 +6219,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6052 vectors = 0; 6219 vectors = 0;
6053 } 6220 }
6054 6221
6055 pf->num_msix_entries = vectors;
6056
6057 return vectors; 6222 return vectors;
6058} 6223}
6059 6224
@@ -6107,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
6107 for (i = 0; i < v_budget; i++) 6272 for (i = 0; i < v_budget; i++)
6108 pf->msix_entries[i].entry = i; 6273 pf->msix_entries[i].entry = i;
6109 vec = i40e_reserve_msix_vectors(pf, v_budget); 6274 vec = i40e_reserve_msix_vectors(pf, v_budget);
6275
6276 if (vec != v_budget) {
6277 /* If we have limited resources, we will start with no vectors
6278 * for the special features and then allocate vectors to some
6279 * of these features based on the policy and at the end disable
6280 * the features that did not get any vectors.
6281 */
6282 pf->num_vmdq_msix = 0;
6283 }
6284
6110 if (vec < I40E_MIN_MSIX) { 6285 if (vec < I40E_MIN_MSIX) {
6111 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6286 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6112 kfree(pf->msix_entries); 6287 kfree(pf->msix_entries);
@@ -6115,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
6115 6290
6116 } else if (vec == I40E_MIN_MSIX) { 6291 } else if (vec == I40E_MIN_MSIX) {
6117 /* Adjust for minimal MSIX use */ 6292 /* Adjust for minimal MSIX use */
6118 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
6119 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6120 pf->num_vmdq_vsis = 0; 6293 pf->num_vmdq_vsis = 0;
6121 pf->num_vmdq_qps = 0; 6294 pf->num_vmdq_qps = 0;
6122 pf->num_vmdq_msix = 0;
6123 pf->num_lan_qps = 1; 6295 pf->num_lan_qps = 1;
6124 pf->num_lan_msix = 1; 6296 pf->num_lan_msix = 1;
6125 6297
6126 } else if (vec != v_budget) { 6298 } else if (vec != v_budget) {
6299 /* reserve the misc vector */
6300 vec--;
6301
6127 /* Scale vector usage down */ 6302 /* Scale vector usage down */
6128 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6303 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6129 vec--; /* reserve the misc vector */ 6304 pf->num_vmdq_vsis = 1;
6130 6305
6131 /* partition out the remaining vectors */ 6306 /* partition out the remaining vectors */
6132 switch (vec) { 6307 switch (vec) {
6133 case 2: 6308 case 2:
6134 pf->num_vmdq_vsis = 1;
6135 pf->num_lan_msix = 1; 6309 pf->num_lan_msix = 1;
6136 break; 6310 break;
6137 case 3: 6311 case 3:
6138 pf->num_vmdq_vsis = 1;
6139 pf->num_lan_msix = 2; 6312 pf->num_lan_msix = 2;
6140 break; 6313 break;
6141 default: 6314 default:
@@ -6147,6 +6320,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
6147 } 6320 }
6148 } 6321 }
6149 6322
6323 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6324 (pf->num_vmdq_msix == 0)) {
6325 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6326 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6327 }
6150 return err; 6328 return err;
6151} 6329}
6152 6330
@@ -6171,7 +6349,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
6171 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 6349 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6172 if (vsi->netdev) 6350 if (vsi->netdev)
6173 netif_napi_add(vsi->netdev, &q_vector->napi, 6351 netif_napi_add(vsi->netdev, &q_vector->napi,
6174 i40e_napi_poll, vsi->work_limit); 6352 i40e_napi_poll, NAPI_POLL_WEIGHT);
6175 6353
6176 q_vector->rx.latency_range = I40E_LOW_LATENCY; 6354 q_vector->rx.latency_range = I40E_LOW_LATENCY;
6177 q_vector->tx.latency_range = I40E_LOW_LATENCY; 6355 q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -6231,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6231 if (err) { 6409 if (err) {
6232 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6410 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6233 I40E_FLAG_RSS_ENABLED | 6411 I40E_FLAG_RSS_ENABLED |
6234 I40E_FLAG_DCB_ENABLED | 6412 I40E_FLAG_DCB_CAPABLE |
6235 I40E_FLAG_SRIOV_ENABLED | 6413 I40E_FLAG_SRIOV_ENABLED |
6236 I40E_FLAG_FD_SB_ENABLED | 6414 I40E_FLAG_FD_SB_ENABLED |
6237 I40E_FLAG_FD_ATR_ENABLED | 6415 I40E_FLAG_FD_ATR_ENABLED |
@@ -6364,7 +6542,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6364 return 0; 6542 return 0;
6365 6543
6366 queue_count = min_t(int, queue_count, pf->rss_size_max); 6544 queue_count = min_t(int, queue_count, pf->rss_size_max);
6367 queue_count = rounddown_pow_of_two(queue_count);
6368 6545
6369 if (queue_count != pf->rss_size) { 6546 if (queue_count != pf->rss_size) {
6370 i40e_prep_for_reset(pf); 6547 i40e_prep_for_reset(pf);
@@ -6407,6 +6584,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
6407 I40E_FLAG_MSIX_ENABLED | 6584 I40E_FLAG_MSIX_ENABLED |
6408 I40E_FLAG_RX_1BUF_ENABLED; 6585 I40E_FLAG_RX_1BUF_ENABLED;
6409 6586
6587 /* Set default ITR */
6588 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
6589 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
6590
6410 /* Depending on PF configurations, it is possible that the RSS 6591 /* Depending on PF configurations, it is possible that the RSS
6411 * maximum might end up larger than the available queues 6592 * maximum might end up larger than the available queues
6412 */ 6593 */
@@ -6416,7 +6597,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
6416 if (pf->hw.func_caps.rss) { 6597 if (pf->hw.func_caps.rss) {
6417 pf->flags |= I40E_FLAG_RSS_ENABLED; 6598 pf->flags |= I40E_FLAG_RSS_ENABLED;
6418 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6599 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
6419 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
6420 } else { 6600 } else {
6421 pf->rss_size = 1; 6601 pf->rss_size = 1;
6422 } 6602 }
@@ -6432,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
6432 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6612 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6433 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6613 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6434 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6614 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6615 /* Setup a counter for fd_atr per pf */
6616 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
6435 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6617 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6436 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6618 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6619 /* Setup a counter for fd_sb per pf */
6620 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
6437 } else { 6621 } else {
6438 dev_info(&pf->pdev->dev, 6622 dev_info(&pf->pdev->dev,
6439 "Flow Director Sideband mode Disabled in MFP mode\n"); 6623 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -6649,6 +6833,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
6649} 6833}
6650 6834
6651#endif 6835#endif
6836#ifdef HAVE_FDB_OPS
6837#ifdef USE_CONST_DEV_UC_CHAR
6838static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6839 struct net_device *dev,
6840 const unsigned char *addr,
6841 u16 flags)
6842#else
6843static int i40e_ndo_fdb_add(struct ndmsg *ndm,
6844 struct net_device *dev,
6845 unsigned char *addr,
6846 u16 flags)
6847#endif
6848{
6849 struct i40e_netdev_priv *np = netdev_priv(dev);
6850 struct i40e_pf *pf = np->vsi->back;
6851 int err = 0;
6852
6853 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
6854 return -EOPNOTSUPP;
6855
6856 /* Hardware does not support aging addresses so if a
6857 * ndm_state is given only allow permanent addresses
6858 */
6859 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6860 netdev_info(dev, "FDB only supports static addresses\n");
6861 return -EINVAL;
6862 }
6863
6864 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6865 err = dev_uc_add_excl(dev, addr);
6866 else if (is_multicast_ether_addr(addr))
6867 err = dev_mc_add_excl(dev, addr);
6868 else
6869 err = -EINVAL;
6870
6871 /* Only return duplicate errors if NLM_F_EXCL is set */
6872 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6873 err = 0;
6874
6875 return err;
6876}
6877
6878#ifndef USE_DEFAULT_FDB_DEL_DUMP
6879#ifdef USE_CONST_DEV_UC_CHAR
6880static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6881 struct net_device *dev,
6882 const unsigned char *addr)
6883#else
6884static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6885 struct net_device *dev,
6886 unsigned char *addr)
6887#endif
6888{
6889 struct i40e_netdev_priv *np = netdev_priv(dev);
6890 struct i40e_pf *pf = np->vsi->back;
6891 int err = -EOPNOTSUPP;
6892
6893 if (ndm->ndm_state & NUD_PERMANENT) {
6894 netdev_info(dev, "FDB only supports static addresses\n");
6895 return -EINVAL;
6896 }
6897
6898 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
6899 if (is_unicast_ether_addr(addr))
6900 err = dev_uc_del(dev, addr);
6901 else if (is_multicast_ether_addr(addr))
6902 err = dev_mc_del(dev, addr);
6903 else
6904 err = -EINVAL;
6905 }
6906
6907 return err;
6908}
6909
6910static int i40e_ndo_fdb_dump(struct sk_buff *skb,
6911 struct netlink_callback *cb,
6912 struct net_device *dev,
6913 int idx)
6914{
6915 struct i40e_netdev_priv *np = netdev_priv(dev);
6916 struct i40e_pf *pf = np->vsi->back;
6917
6918 if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
6919 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6920
6921 return idx;
6922}
6923
6924#endif /* USE_DEFAULT_FDB_DEL_DUMP */
6925#endif /* HAVE_FDB_OPS */
6652static const struct net_device_ops i40e_netdev_ops = { 6926static const struct net_device_ops i40e_netdev_ops = {
6653 .ndo_open = i40e_open, 6927 .ndo_open = i40e_open,
6654 .ndo_stop = i40e_close, 6928 .ndo_stop = i40e_close,
@@ -6669,13 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
6669 .ndo_set_features = i40e_set_features, 6943 .ndo_set_features = i40e_set_features,
6670 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 6944 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
6671 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6945 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6672 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6946 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
6673 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6947 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6674 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 6948 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6949 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
6675#ifdef CONFIG_I40E_VXLAN 6950#ifdef CONFIG_I40E_VXLAN
6676 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6951 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6677 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6952 .ndo_del_vxlan_port = i40e_del_vxlan_port,
6678#endif 6953#endif
6954#ifdef HAVE_FDB_OPS
6955 .ndo_fdb_add = i40e_ndo_fdb_add,
6956#ifndef USE_DEFAULT_FDB_DEL_DUMP
6957 .ndo_fdb_del = i40e_ndo_fdb_del,
6958 .ndo_fdb_dump = i40e_ndo_fdb_dump,
6959#endif
6960#endif
6679}; 6961};
6680 6962
6681/** 6963/**
@@ -6720,16 +7002,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6720 NETIF_F_TSO_ECN | 7002 NETIF_F_TSO_ECN |
6721 NETIF_F_TSO6 | 7003 NETIF_F_TSO6 |
6722 NETIF_F_RXCSUM | 7004 NETIF_F_RXCSUM |
6723 NETIF_F_NTUPLE |
6724 NETIF_F_RXHASH | 7005 NETIF_F_RXHASH |
6725 0; 7006 0;
6726 7007
7008 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7009 netdev->features |= NETIF_F_NTUPLE;
7010
6727 /* copy netdev features into list of user selectable features */ 7011 /* copy netdev features into list of user selectable features */
6728 netdev->hw_features |= netdev->features; 7012 netdev->hw_features |= netdev->features;
6729 7013
6730 if (vsi->type == I40E_VSI_MAIN) { 7014 if (vsi->type == I40E_VSI_MAIN) {
6731 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 7015 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6732 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); 7016 ether_addr_copy(mac_addr, hw->mac.perm_addr);
7017 /* The following two steps are necessary to prevent reception
7018 * of tagged packets - by default the NVM loads a MAC-VLAN
7019 * filter that will accept any tagged packet. This is to
7020 * prevent that during normal operations until a specific
7021 * VLAN tag filter has been set.
7022 */
7023 i40e_rm_default_mac_filter(vsi, mac_addr);
7024 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
6733 } else { 7025 } else {
6734 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 7026 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6735 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 7027 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -6739,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6739 } 7031 }
6740 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 7032 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
6741 7033
6742 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 7034 ether_addr_copy(netdev->dev_addr, mac_addr);
6743 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); 7035 ether_addr_copy(netdev->perm_addr, mac_addr);
6744 /* vlan gets same features (except vlan offload) 7036 /* vlan gets same features (except vlan offload)
6745 * after any tweaks for specific VSI types 7037 * after any tweaks for specific VSI types
6746 */ 7038 */
@@ -6772,7 +7064,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
6772 return; 7064 return;
6773 7065
6774 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 7066 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
6775 return;
6776} 7067}
6777 7068
6778/** 7069/**
@@ -6898,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
6898 7189
6899 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 7190 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6900 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 7191 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7192 if (pf->vf[vsi->vf_id].spoofchk) {
7193 ctxt.info.valid_sections |=
7194 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7195 ctxt.info.sec_flags |=
7196 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7197 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7198 }
6901 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7199 /* Setup the VSI tx/rx queue map for TC0 only for now */
6902 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7200 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6903 break; 7201 break;
@@ -6982,11 +7280,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6982 unregister_netdev(vsi->netdev); 7280 unregister_netdev(vsi->netdev);
6983 } 7281 }
6984 } else { 7282 } else {
6985 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 7283 i40e_vsi_close(vsi);
6986 i40e_down(vsi);
6987 i40e_vsi_free_irq(vsi);
6988 i40e_vsi_free_tx_resources(vsi);
6989 i40e_vsi_free_rx_resources(vsi);
6990 } 7284 }
6991 i40e_vsi_disable_irq(vsi); 7285 i40e_vsi_disable_irq(vsi);
6992 } 7286 }
@@ -7013,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
7013 * the orphan VEBs yet. We'll wait for an explicit remove request 7307 * the orphan VEBs yet. We'll wait for an explicit remove request
7014 * from up the network stack. 7308 * from up the network stack.
7015 */ 7309 */
7016 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7310 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
7017 if (pf->vsi[i] && 7311 if (pf->vsi[i] &&
7018 pf->vsi[i]->uplink_seid == uplink_seid && 7312 pf->vsi[i]->uplink_seid == uplink_seid &&
7019 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7313 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7192,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7192 7486
7193 if (!veb && uplink_seid != pf->mac_seid) { 7487 if (!veb && uplink_seid != pf->mac_seid) {
7194 7488
7195 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7489 for (i = 0; i < pf->num_alloc_vsi; i++) {
7196 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 7490 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7197 vsi = pf->vsi[i]; 7491 vsi = pf->vsi[i];
7198 break; 7492 break;
@@ -7435,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
7435 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 7729 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7436 * the VEB itself, so don't use (*branch) after this loop. 7730 * the VEB itself, so don't use (*branch) after this loop.
7437 */ 7731 */
7438 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7732 for (i = 0; i < pf->num_alloc_vsi; i++) {
7439 if (!pf->vsi[i]) 7733 if (!pf->vsi[i])
7440 continue; 7734 continue;
7441 if (pf->vsi[i]->uplink_seid == branch_seid && 7735 if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7487,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
7487 pf = veb->pf; 7781 pf = veb->pf;
7488 7782
7489 /* find the remaining VSI and check for extras */ 7783 /* find the remaining VSI and check for extras */
7490 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7784 for (i = 0; i < pf->num_alloc_vsi; i++) {
7491 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 7785 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7492 n++; 7786 n++;
7493 vsi = pf->vsi[i]; 7787 vsi = pf->vsi[i];
@@ -7516,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
7516 7810
7517 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 7811 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7518 i40e_veb_clear(veb); 7812 i40e_veb_clear(veb);
7519
7520 return;
7521} 7813}
7522 7814
7523/** 7815/**
@@ -7601,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7601 } 7893 }
7602 7894
7603 /* make sure there is such a vsi and uplink */ 7895 /* make sure there is such a vsi and uplink */
7604 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 7896 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
7605 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 7897 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7606 break; 7898 break;
7607 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 7899 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
7608 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 7900 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7609 vsi_seid); 7901 vsi_seid);
7610 return NULL; 7902 return NULL;
@@ -7639,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7639 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 7931 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7640 if (ret) 7932 if (ret)
7641 goto err_veb; 7933 goto err_veb;
7934 if (vsi_idx == pf->lan_vsi)
7935 pf->lan_veb = veb->idx;
7642 7936
7643 return veb; 7937 return veb;
7644 7938
@@ -7774,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7774 "header: %d reported %d total\n", 8068 "header: %d reported %d total\n",
7775 num_reported, num_total); 8069 num_reported, num_total);
7776 8070
7777 if (num_reported) {
7778 int sz = sizeof(*sw_config) * num_reported;
7779
7780 kfree(pf->sw_config);
7781 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7782 if (pf->sw_config)
7783 memcpy(pf->sw_config, sw_config, sz);
7784 }
7785
7786 for (i = 0; i < num_reported; i++) { 8071 for (i = 0; i < num_reported; i++) {
7787 struct i40e_aqc_switch_config_element_resp *ele = 8072 struct i40e_aqc_switch_config_element_resp *ele =
7788 &sw_config->element[i]; 8073 &sw_config->element[i];
@@ -7949,9 +8234,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7949 queues_left = pf->hw.func_caps.num_tx_qp; 8234 queues_left = pf->hw.func_caps.num_tx_qp;
7950 8235
7951 if ((queues_left == 1) || 8236 if ((queues_left == 1) ||
7952 !(pf->flags & I40E_FLAG_MSIX_ENABLED) || 8237 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7953 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
7954 I40E_FLAG_DCB_ENABLED))) {
7955 /* one qp for PF, no queues for anything else */ 8238 /* one qp for PF, no queues for anything else */
7956 queues_left = 0; 8239 queues_left = 0;
7957 pf->rss_size = pf->num_lan_qps = 1; 8240 pf->rss_size = pf->num_lan_qps = 1;
@@ -7960,14 +8243,27 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7960 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8243 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
7961 I40E_FLAG_FD_SB_ENABLED | 8244 I40E_FLAG_FD_SB_ENABLED |
7962 I40E_FLAG_FD_ATR_ENABLED | 8245 I40E_FLAG_FD_ATR_ENABLED |
7963 I40E_FLAG_DCB_ENABLED | 8246 I40E_FLAG_DCB_CAPABLE |
7964 I40E_FLAG_SRIOV_ENABLED | 8247 I40E_FLAG_SRIOV_ENABLED |
7965 I40E_FLAG_VMDQ_ENABLED); 8248 I40E_FLAG_VMDQ_ENABLED);
8249 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8250 I40E_FLAG_FD_SB_ENABLED |
8251 I40E_FLAG_FD_ATR_ENABLED |
8252 I40E_FLAG_DCB_CAPABLE))) {
8253 /* one qp for PF */
8254 pf->rss_size = pf->num_lan_qps = 1;
8255 queues_left -= pf->num_lan_qps;
8256
8257 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8258 I40E_FLAG_FD_SB_ENABLED |
8259 I40E_FLAG_FD_ATR_ENABLED |
8260 I40E_FLAG_DCB_ENABLED |
8261 I40E_FLAG_VMDQ_ENABLED);
7966 } else { 8262 } else {
7967 /* Not enough queues for all TCs */ 8263 /* Not enough queues for all TCs */
7968 if ((pf->flags & I40E_FLAG_DCB_ENABLED) && 8264 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
7969 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 8265 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
7970 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8266 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7971 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 8267 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
7972 } 8268 }
7973 pf->num_lan_qps = pf->rss_size_max; 8269 pf->num_lan_qps = pf->rss_size_max;
@@ -7998,7 +8294,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
7998 } 8294 }
7999 8295
8000 pf->queues_left = queues_left; 8296 pf->queues_left = queues_left;
8001 return;
8002} 8297}
8003 8298
8004/** 8299/**
@@ -8055,12 +8350,13 @@ static void i40e_print_features(struct i40e_pf *pf)
8055 8350
8056 if (pf->flags & I40E_FLAG_RSS_ENABLED) 8351 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8057 buf += sprintf(buf, "RSS "); 8352 buf += sprintf(buf, "RSS ");
8058 buf += sprintf(buf, "FDir ");
8059 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 8353 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8060 buf += sprintf(buf, "ATR "); 8354 buf += sprintf(buf, "FD_ATR ");
8061 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 8355 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8356 buf += sprintf(buf, "FD_SB ");
8062 buf += sprintf(buf, "NTUPLE "); 8357 buf += sprintf(buf, "NTUPLE ");
8063 if (pf->flags & I40E_FLAG_DCB_ENABLED) 8358 }
8359 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
8064 buf += sprintf(buf, "DCB "); 8360 buf += sprintf(buf, "DCB ");
8065 if (pf->flags & I40E_FLAG_PTP) 8361 if (pf->flags & I40E_FLAG_PTP)
8066 buf += sprintf(buf, "PTP "); 8362 buf += sprintf(buf, "PTP ");
@@ -8083,13 +8379,13 @@ static void i40e_print_features(struct i40e_pf *pf)
8083 **/ 8379 **/
8084static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 8380static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8085{ 8381{
8086 struct i40e_driver_version dv;
8087 struct i40e_pf *pf; 8382 struct i40e_pf *pf;
8088 struct i40e_hw *hw; 8383 struct i40e_hw *hw;
8089 static u16 pfs_found; 8384 static u16 pfs_found;
8090 u16 link_status; 8385 u16 link_status;
8091 int err = 0; 8386 int err = 0;
8092 u32 len; 8387 u32 len;
8388 u32 i;
8093 8389
8094 err = pci_enable_device_mem(pdev); 8390 err = pci_enable_device_mem(pdev);
8095 if (err) 8391 if (err)
@@ -8201,6 +8497,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8201 8497
8202 i40e_verify_eeprom(pf); 8498 i40e_verify_eeprom(pf);
8203 8499
8500 /* Rev 0 hardware was never productized */
8501 if (hw->revision_id < 1)
8502 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
8503
8204 i40e_clear_pxe_mode(hw); 8504 i40e_clear_pxe_mode(hw);
8205 err = i40e_get_capabilities(pf); 8505 err = i40e_get_capabilities(pf);
8206 if (err) 8506 if (err)
@@ -8234,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8234 goto err_mac_addr; 8534 goto err_mac_addr;
8235 } 8535 }
8236 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 8536 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
8237 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); 8537 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
8238 8538
8239 pci_set_drvdata(pdev, pf); 8539 pci_set_drvdata(pdev, pf);
8240 pci_save_state(pdev); 8540 pci_save_state(pdev);
@@ -8242,8 +8542,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8242 err = i40e_init_pf_dcb(pf); 8542 err = i40e_init_pf_dcb(pf);
8243 if (err) { 8543 if (err) {
8244 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 8544 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8245 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8545 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
8246 goto err_init_dcb; 8546 /* Continue without DCB enabled */
8247 } 8547 }
8248#endif /* CONFIG_I40E_DCB */ 8548#endif /* CONFIG_I40E_DCB */
8249 8549
@@ -8264,10 +8564,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8264 i40e_determine_queue_usage(pf); 8564 i40e_determine_queue_usage(pf);
8265 i40e_init_interrupt_scheme(pf); 8565 i40e_init_interrupt_scheme(pf);
8266 8566
8267 /* Set up the *vsi struct based on the number of VSIs in the HW, 8567 /* The number of VSIs reported by the FW is the minimum guaranteed
8268 * and set up our local tracking of the MAIN PF vsi. 8568 * to us; HW supports far more and we share the remaining pool with
8569 * the other PFs. We allocate space for more than the guarantee with
8570 * the understanding that we might not get them all later.
8269 */ 8571 */
8270 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 8572 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
8573 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
8574 else
8575 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
8576
8577 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
8578 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
8271 pf->vsi = kzalloc(len, GFP_KERNEL); 8579 pf->vsi = kzalloc(len, GFP_KERNEL);
8272 if (!pf->vsi) { 8580 if (!pf->vsi) {
8273 err = -ENOMEM; 8581 err = -ENOMEM;
@@ -8279,6 +8587,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8279 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 8587 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8280 goto err_vsis; 8588 goto err_vsis;
8281 } 8589 }
8590 /* if FDIR VSI was set up, start it now */
8591 for (i = 0; i < pf->num_alloc_vsi; i++) {
8592 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8593 i40e_vsi_open(pf->vsi[i]);
8594 break;
8595 }
8596 }
8282 8597
8283 /* The main driver is (mostly) up and happy. We need to set this state 8598 /* The main driver is (mostly) up and happy. We need to set this state
8284 * before setting up the misc vector or we get a race and the vector 8599 * before setting up the misc vector or we get a race and the vector
@@ -8300,6 +8615,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8300 } 8615 }
8301 } 8616 }
8302 8617
8618#ifdef CONFIG_PCI_IOV
8303 /* prep for VF support */ 8619 /* prep for VF support */
8304 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8620 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8305 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 8621 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8322,17 +8638,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8322 err); 8638 err);
8323 } 8639 }
8324 } 8640 }
8641#endif /* CONFIG_PCI_IOV */
8325 8642
8326 pfs_found++; 8643 pfs_found++;
8327 8644
8328 i40e_dbg_pf_init(pf); 8645 i40e_dbg_pf_init(pf);
8329 8646
8330 /* tell the firmware that we're starting */ 8647 /* tell the firmware that we're starting */
8331 dv.major_version = DRV_VERSION_MAJOR; 8648 i40e_send_version(pf);
8332 dv.minor_version = DRV_VERSION_MINOR;
8333 dv.build_version = DRV_VERSION_BUILD;
8334 dv.subbuild_version = 0;
8335 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
8336 8649
8337 /* since everything's happy, start the service_task timer */ 8650 /* since everything's happy, start the service_task timer */
8338 mod_timer(&pf->service_timer, 8651 mod_timer(&pf->service_timer,
@@ -8373,9 +8686,6 @@ err_vsis:
8373err_switch_setup: 8686err_switch_setup:
8374 i40e_reset_interrupt_capability(pf); 8687 i40e_reset_interrupt_capability(pf);
8375 del_timer_sync(&pf->service_timer); 8688 del_timer_sync(&pf->service_timer);
8376#ifdef CONFIG_I40E_DCB
8377err_init_dcb:
8378#endif /* CONFIG_I40E_DCB */
8379err_mac_addr: 8689err_mac_addr:
8380err_configure_lan_hmc: 8690err_configure_lan_hmc:
8381 (void)i40e_shutdown_lan_hmc(hw); 8691 (void)i40e_shutdown_lan_hmc(hw);
@@ -8456,10 +8766,13 @@ static void i40e_remove(struct pci_dev *pdev)
8456 } 8766 }
8457 8767
8458 /* shutdown and destroy the HMC */ 8768 /* shutdown and destroy the HMC */
8459 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 8769 if (pf->hw.hmc.hmc_obj) {
8460 if (ret_code) 8770 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8461 dev_warn(&pdev->dev, 8771 if (ret_code)
8462 "Failed to destroy the HMC resources: %d\n", ret_code); 8772 dev_warn(&pdev->dev,
8773 "Failed to destroy the HMC resources: %d\n",
8774 ret_code);
8775 }
8463 8776
8464 /* shutdown the adminq */ 8777 /* shutdown the adminq */
8465 ret_code = i40e_shutdown_adminq(&pf->hw); 8778 ret_code = i40e_shutdown_adminq(&pf->hw);
@@ -8470,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
8470 8783
8471 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 8784 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8472 i40e_clear_interrupt_scheme(pf); 8785 i40e_clear_interrupt_scheme(pf);
8473 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8786 for (i = 0; i < pf->num_alloc_vsi; i++) {
8474 if (pf->vsi[i]) { 8787 if (pf->vsi[i]) {
8475 i40e_vsi_clear_rings(pf->vsi[i]); 8788 i40e_vsi_clear_rings(pf->vsi[i]);
8476 i40e_vsi_clear(pf->vsi[i]); 8789 i40e_vsi_clear(pf->vsi[i]);
@@ -8485,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
8485 8798
8486 kfree(pf->qp_pile); 8799 kfree(pf->qp_pile);
8487 kfree(pf->irq_pile); 8800 kfree(pf->irq_pile);
8488 kfree(pf->sw_config);
8489 kfree(pf->vsi); 8801 kfree(pf->vsi);
8490 8802
8491 /* force a PF reset to clean anything leftover */ 8803 /* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..a430699c41d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,10 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
70 u16 *fw_major_version, u16 *fw_minor_version, 70 u16 *fw_major_version, u16 *fw_minor_version,
71 u16 *api_major_version, u16 *api_minor_version, 71 u16 *api_major_version, u16 *api_minor_version,
72 struct i40e_asq_cmd_details *cmd_details); 72 struct i40e_asq_cmd_details *cmd_details);
73i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw, 73i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
74 struct i40e_asq_cmd_details *cmd_details); 74 struct i40e_asq_cmd_details *cmd_details);
75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
76 struct i40e_asq_cmd_details *cmd_details); 76 struct i40e_asq_cmd_details *cmd_details);
77i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
78 struct i40e_asq_cmd_details *cmd_details);
77i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 79i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
78 struct i40e_asq_cmd_details *cmd_details); 80 struct i40e_asq_cmd_details *cmd_details);
79i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 81i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
157i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, 159i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
158 struct i40e_asq_cmd_details *cmd_details); 160 struct i40e_asq_cmd_details *cmd_details);
159i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 161i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
160 u16 udp_port, u8 header_len, 162 u16 udp_port, u8 protocol_index,
161 u8 protocol_index, u8 *filter_index, 163 u8 *filter_index,
162 struct i40e_asq_cmd_details *cmd_details); 164 struct i40e_asq_cmd_details *cmd_details);
163i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 165i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
164 struct i40e_asq_cmd_details *cmd_details); 166 struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +169,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
167i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 169i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
168 u16 flags, u8 *mac_addr, 170 u16 flags, u8 *mac_addr,
169 struct i40e_asq_cmd_details *cmd_details); 171 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
173 u16 seid, u16 credit, u8 max_credit,
174 struct i40e_asq_cmd_details *cmd_details);
170i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 175i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
171 struct i40e_asq_cmd_details *cmd_details); 176 struct i40e_asq_cmd_details *cmd_details);
172i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, 177i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
@@ -216,6 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
216i40e_status i40e_get_mac_addr(struct i40e_hw *hw, 221i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
217 u8 *mac_addr); 222 u8 *mac_addr);
218i40e_status i40e_validate_mac_addr(u8 *mac_addr); 223i40e_status i40e_validate_mac_addr(u8 *mac_addr);
224void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
219/* prototype for functions used for NVM access */ 225/* prototype for functions used for NVM access */
220i40e_status i40e_init_nvm(struct i40e_hw *hw); 226i40e_status i40e_init_nvm(struct i40e_hw *hw);
221i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 227i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..101f439acda6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48,7 +48,6 @@
48 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 48 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
49#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ 49#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
50 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 50 I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
51#define I40E_PTP_TX_TIMEOUT (HZ * 15)
52 51
53/** 52/**
54 * i40e_ptp_read - Read the PHC time from the device 53 * i40e_ptp_read - Read the PHC time from the device
@@ -217,40 +216,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
217} 216}
218 217
219/** 218/**
220 * i40e_ptp_tx_work
221 * @work: pointer to work struct
222 *
223 * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
224 * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
225 * the stack in the skb.
226 */
227static void i40e_ptp_tx_work(struct work_struct *work)
228{
229 struct i40e_pf *pf = container_of(work, struct i40e_pf,
230 ptp_tx_work);
231 struct i40e_hw *hw = &pf->hw;
232 u32 prttsyn_stat_0;
233
234 if (!pf->ptp_tx_skb)
235 return;
236
237 if (time_is_before_jiffies(pf->ptp_tx_start +
238 I40E_PTP_TX_TIMEOUT)) {
239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return;
244 }
245
246 prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
247 if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
248 i40e_ptp_tx_hwtstamp(pf);
249 else
250 schedule_work(&pf->ptp_tx_work);
251}
252
253/**
254 * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem 219 * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
255 * @ptp: The PTP clock structure 220 * @ptp: The PTP clock structure
256 * @rq: The requested feature to change 221 * @rq: The requested feature to change
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
608 u32 regval; 573 u32 regval;
609 574
610 spin_lock_init(&pf->tmreg_lock); 575 spin_lock_init(&pf->tmreg_lock);
611 INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
612 576
613 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, 577 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
614 netdev->name); 578 netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
647 pf->ptp_tx = false; 611 pf->ptp_tx = false;
648 pf->ptp_rx = false; 612 pf->ptp_rx = false;
649 613
650 cancel_work_sync(&pf->ptp_tx_work);
651 if (pf->ptp_tx_skb) { 614 if (pf->ptp_tx_skb) {
652 dev_kfree_skb_any(pf->ptp_tx_skb); 615 dev_kfree_skb_any(pf->ptp_tx_skb);
653 pf->ptp_tx_skb = NULL; 616 pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1d40f425acf1..947de98500f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1340,8 +1340,6 @@
1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) 1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) 1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1343#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1344#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1345#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1346#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) 1344#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1347#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 1345#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1367,8 +1365,6 @@
1367#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) 1365#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1368#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1369#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) 1367#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1370#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1371#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1372#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 1368#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1373#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) 1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1374#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 1370#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1589,6 +1585,14 @@
1589#define I40E_GLLAN_TSOMSK_M 0x000442DC 1585#define I40E_GLLAN_TSOMSK_M 0x000442DC
1590#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 1586#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1591#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) 1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1588#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
1589#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
1590#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
1591#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
1592#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
1593#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
1594#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
1595
1592#define I40E_PFLAN_QALLOC 0x001C0400 1596#define I40E_PFLAN_QALLOC 0x001C0400
1593#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1597#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1594#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1598#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..e49f31dbd5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include <linux/prefetch.h>
27#include "i40e.h" 28#include "i40e.h"
28#include "i40e_prototype.h" 29#include "i40e_prototype.h"
29 30
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
61 62
62 /* find existing FDIR VSI */ 63 /* find existing FDIR VSI */
63 vsi = NULL; 64 vsi = NULL;
64 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 65 for (i = 0; i < pf->num_alloc_vsi; i++)
65 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) 66 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
66 vsi = pf->vsi[i]; 67 vsi = pf->vsi[i];
67 if (!vsi) 68 if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
120 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 121 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
121 dcc |= ((u32)fdir_data->cnt_index << 122 dcc |= ((u32)fdir_data->cnt_index <<
122 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 123 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
123 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 124 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
124 } 125 }
125 126
126 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); 127 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
183 struct iphdr *ip; 184 struct iphdr *ip;
184 bool err = false; 185 bool err = false;
185 int ret; 186 int ret;
186 int i;
187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, 188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
199 ip->saddr = fd_data->src_ip[0]; 199 ip->saddr = fd_data->src_ip[0];
200 udp->source = fd_data->src_port; 200 udp->source = fd_data->src_port;
201 201
202 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; 202 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
203 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { 203 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
204 fd_data->pctype = i; 204 if (ret) {
205 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 205 dev_info(&pf->pdev->dev,
206 206 "Filter command send failed for PCTYPE %d (ret = %d)\n",
207 if (ret) { 207 fd_data->pctype, ret);
208 dev_info(&pf->pdev->dev, 208 err = true;
209 "Filter command send failed for PCTYPE %d (ret = %d)\n", 209 } else {
210 fd_data->pctype, ret); 210 dev_info(&pf->pdev->dev,
211 err = true; 211 "Filter OK for PCTYPE %d (ret = %d)\n",
212 } else { 212 fd_data->pctype, ret);
213 dev_info(&pf->pdev->dev,
214 "Filter OK for PCTYPE %d (ret = %d)\n",
215 fd_data->pctype, ret);
216 }
217 } 213 }
218 214
219 return err ? -EOPNOTSUPP : 0; 215 return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
262 } 258 }
263 } 259 }
264 260
265 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; 261 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
266 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 262 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
267 263
268 if (ret) { 264 if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 451
456 /* filter programming failed most likely due to table full */ 452 /* filter programming failed most likely due to table full */
457 fcnt_prog = i40e_get_current_fd_count(pf); 453 fcnt_prog = i40e_get_current_fd_count(pf);
458 fcnt_avail = pf->hw.fdir_shared_filter_count + 454 fcnt_avail = i40e_get_fd_cnt_all(pf);
459 pf->fdir_pf_filter_count;
460
461 /* If ATR is running fcnt_prog can quickly change, 455 /* If ATR is running fcnt_prog can quickly change,
462 * if we are very close to full, it makes sense to disable 456 * if we are very close to full, it makes sense to disable
463 * FD ATR/SB and then re-enable it when there is room. 457 * FD ATR/SB and then re-enable it when there is room.
464 */ 458 */
465 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 459 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
466 /* Turn off ATR first */ 460 /* Turn off ATR first */
467 if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) { 461 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
468 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 462 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
469 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); 463 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
470 pf->auto_disable_flags |= 464 pf->auto_disable_flags |=
471 I40E_FLAG_FD_ATR_ENABLED; 465 I40E_FLAG_FD_ATR_ENABLED;
472 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 466 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
473 } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) { 467 } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
474 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 468 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
475 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 469 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
476 pf->auto_disable_flags |= 470 pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1199 u32 rx_error, 1193 u32 rx_error,
1200 u16 rx_ptype) 1194 u16 rx_ptype)
1201{ 1195{
1196 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1197 bool ipv4 = false, ipv6 = false;
1202 bool ipv4_tunnel, ipv6_tunnel; 1198 bool ipv4_tunnel, ipv6_tunnel;
1203 __wsum rx_udp_csum; 1199 __wsum rx_udp_csum;
1204 __sum16 csum;
1205 struct iphdr *iph; 1200 struct iphdr *iph;
1201 __sum16 csum;
1206 1202
1207 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 1203 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1208 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 1204 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1213 skb->ip_summed = CHECKSUM_NONE; 1209 skb->ip_summed = CHECKSUM_NONE;
1214 1210
1215 /* Rx csum enabled and ip headers found? */ 1211 /* Rx csum enabled and ip headers found? */
1216 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 1212 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1217 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1213 return;
1214
1215 /* did the hardware decode the packet and checksum? */
1216 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1217 return;
1218
1219 /* both known and outer_ip must be set for the below code to work */
1220 if (!(decoded.known && decoded.outer_ip))
1218 return; 1221 return;
1219 1222
1223 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1224 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1225 ipv4 = true;
1226 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1227 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1228 ipv6 = true;
1229
1230 if (ipv4 &&
1231 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1232 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1233 goto checksum_fail;
1234
1220 /* likely incorrect csum if alternate IP extension headers found */ 1235 /* likely incorrect csum if alternate IP extension headers found */
1221 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1236 if (ipv6 &&
1237 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
1238 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
1239 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1240 /* don't increment checksum err here, non-fatal err */
1222 return; 1241 return;
1223 1242
1224 /* IP or L4 or outmost IP checksum error */ 1243 /* there was some L4 error, count error and punt packet to the stack */
1225 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 1244 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
1226 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 1245 goto checksum_fail;
1227 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 1246
1228 vsi->back->hw_csum_rx_error++; 1247 /* handle packets that were not able to be checksummed due
1248 * to arrival speed, in this case the stack can compute
1249 * the csum.
1250 */
1251 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
1229 return; 1252 return;
1230 }
1231 1253
1254 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1255 * it in the driver, hardware does not do it for us.
1256 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1257 * so the total length of IPv4 header is IHL*4 bytes
1258 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1259 */
1232 if (ipv4_tunnel && 1260 if (ipv4_tunnel &&
1261 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
1233 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 1262 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1234 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1235 * it in the driver, hardware does not do it for us.
1236 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1237 * so the total length of IPv4 header is IHL*4 bytes
1238 */
1239 skb->transport_header = skb->mac_header + 1263 skb->transport_header = skb->mac_header +
1240 sizeof(struct ethhdr) + 1264 sizeof(struct ethhdr) +
1241 (ip_hdr(skb)->ihl * 4); 1265 (ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1252 (skb->len - skb_transport_offset(skb)), 1276 (skb->len - skb_transport_offset(skb)),
1253 IPPROTO_UDP, rx_udp_csum); 1277 IPPROTO_UDP, rx_udp_csum);
1254 1278
1255 if (udp_hdr(skb)->check != csum) { 1279 if (udp_hdr(skb)->check != csum)
1256 vsi->back->hw_csum_rx_error++; 1280 goto checksum_fail;
1257 return;
1258 }
1259 } 1281 }
1260 1282
1261 skb->ip_summed = CHECKSUM_UNNECESSARY; 1283 skb->ip_summed = CHECKSUM_UNNECESSARY;
1284
1285 return;
1286
1287checksum_fail:
1288 vsi->back->hw_csum_rx_error++;
1262} 1289}
1263 1290
1264/** 1291/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1435 /* ERR_MASK will only have valid bits if EOP set */ 1462 /* ERR_MASK will only have valid bits if EOP set */
1436 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 1463 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1437 dev_kfree_skb_any(skb); 1464 dev_kfree_skb_any(skb);
1465 /* TODO: shouldn't we increment a counter indicating the
1466 * drop?
1467 */
1438 goto next_desc; 1468 goto next_desc;
1439 } 1469 }
1440 1470
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1665 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 1695 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1666 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 1696 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1667 1697
1698 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
1699 dtype_cmd |=
1700 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1701 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
1702
1668 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 1703 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
1669 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 1704 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1670} 1705}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1825 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 1860 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
1826 I40E_TXD_CTX_QW1_CMD_SHIFT; 1861 I40E_TXD_CTX_QW1_CMD_SHIFT;
1827 1862
1828 pf->ptp_tx_start = jiffies;
1829 schedule_work(&pf->ptp_tx_work);
1830
1831 return 1; 1863 return 1;
1832} 1864}
1833 1865
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2179static int i40e_xmit_descriptor_count(struct sk_buff *skb, 2211static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2180 struct i40e_ring *tx_ring) 2212 struct i40e_ring *tx_ring)
2181{ 2213{
2182#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2183 unsigned int f; 2214 unsigned int f;
2184#endif
2185 int count = 0; 2215 int count = 0;
2186 2216
2187 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 2217 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2190 * + 1 desc for context descriptor, 2220 * + 1 desc for context descriptor,
2191 * otherwise try next time 2221 * otherwise try next time
2192 */ 2222 */
2193#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2194 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2223 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2195 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2224 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2196#else 2225
2197 count += skb_shinfo(skb)->nr_frags;
2198#endif
2199 count += TXD_USE_COUNT(skb_headlen(skb)); 2226 count += TXD_USE_COUNT(skb_headlen(skb));
2200 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 2227 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2201 tx_ring->tx_stats.tx_busy++; 2228 tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d5349698e513..0277894fe1c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,7 +27,7 @@
27#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
29 29
30/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
31 31
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
69 69
70/* Supported RSS offloads */ 70/* Supported RSS offloads */
71#define I40E_DEFAULT_RSS_HENA ( \ 71#define I40E_DEFAULT_RSS_HENA ( \
72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
76 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
79 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
80 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
82 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
83 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ 78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
84 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -122,11 +117,11 @@ enum i40e_dyn_idx_t {
122#define i40e_rx_desc i40e_32byte_rx_desc 117#define i40e_rx_desc i40e_32byte_rx_desc
123 118
124#define I40E_MIN_TX_LEN 17 119#define I40E_MIN_TX_LEN 17
125#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ 120#define I40E_MAX_DATA_PER_TXD 8192
126 121
127/* Tx Descriptors needed, worst case */ 122/* Tx Descriptors needed, worst case */
128#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 123#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
129#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 124#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
130 125
131#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
132#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -184,7 +179,6 @@ enum i40e_ring_state_t {
184 __I40E_TX_DETECT_HANG, 179 __I40E_TX_DETECT_HANG,
185 __I40E_HANG_CHECK_ARMED, 180 __I40E_HANG_CHECK_ARMED,
186 __I40E_RX_PS_ENABLED, 181 __I40E_RX_PS_ENABLED,
187 __I40E_RX_LRO_ENABLED,
188 __I40E_RX_16BYTE_DESC_ENABLED, 182 __I40E_RX_16BYTE_DESC_ENABLED,
189}; 183};
190 184
@@ -200,12 +194,6 @@ enum i40e_ring_state_t {
200 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 194 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
201#define clear_check_for_tx_hang(ring) \ 195#define clear_check_for_tx_hang(ring) \
202 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 196 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
203#define ring_is_lro_enabled(ring) \
204 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
205#define set_ring_lro_enabled(ring) \
206 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
207#define clear_ring_lro_enabled(ring) \
208 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
209#define ring_is_16byte_desc_enabled(ring) \ 197#define ring_is_16byte_desc_enabled(ring) \
210 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) 198 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
211#define set_ring_16byte_desc_enabled(ring) \ 199#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..9d39ff23c5fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,12 +36,10 @@
36 36
37/* Device IDs */ 37/* Device IDs */
38#define I40E_DEV_ID_SFP_XL710 0x1572 38#define I40E_DEV_ID_SFP_XL710 0x1572
39#define I40E_DEV_ID_SFP_X710 0x1573
40#define I40E_DEV_ID_QEMU 0x1574 39#define I40E_DEV_ID_QEMU 0x1574
41#define I40E_DEV_ID_KX_A 0x157F 40#define I40E_DEV_ID_KX_A 0x157F
42#define I40E_DEV_ID_KX_B 0x1580 41#define I40E_DEV_ID_KX_B 0x1580
43#define I40E_DEV_ID_KX_C 0x1581 42#define I40E_DEV_ID_KX_C 0x1581
44#define I40E_DEV_ID_KX_D 0x1582
45#define I40E_DEV_ID_QSFP_A 0x1583 43#define I40E_DEV_ID_QSFP_A 0x1583
46#define I40E_DEV_ID_QSFP_B 0x1584 44#define I40E_DEV_ID_QSFP_B 0x1584
47#define I40E_DEV_ID_QSFP_C 0x1585 45#define I40E_DEV_ID_QSFP_C 0x1585
@@ -60,8 +58,8 @@
60/* Max default timeout in ms, */ 58/* Max default timeout in ms, */
61#define I40E_MAX_NVM_TIMEOUT 18000 59#define I40E_MAX_NVM_TIMEOUT 18000
62 60
63/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ 61/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
64#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) 62#define I40E_MS_TO_GTIME(time) ((time) * 1000)
65 63
66/* forward declaration */ 64/* forward declaration */
67struct i40e_hw; 65struct i40e_hw;
@@ -167,6 +165,9 @@ struct i40e_link_status {
167 u8 loopback; 165 u8 loopback;
168 /* is Link Status Event notification to SW enabled */ 166 /* is Link Status Event notification to SW enabled */
169 bool lse_enable; 167 bool lse_enable;
168 u16 max_frame_size;
169 bool crc_enable;
170 u8 pacing;
170}; 171};
171 172
172struct i40e_phy_info { 173struct i40e_phy_info {
@@ -409,6 +410,7 @@ struct i40e_driver_version {
409 u8 minor_version; 410 u8 minor_version;
410 u8 build_version; 411 u8 build_version;
411 u8 subbuild_version; 412 u8 subbuild_version;
413 u8 driver_string[32];
412}; 414};
413 415
414/* RX Descriptors */ 416/* RX Descriptors */
@@ -488,9 +490,6 @@ union i40e_32byte_rx_desc {
488 } wb; /* writeback */ 490 } wb; /* writeback */
489}; 491};
490 492
491#define I40E_RXD_QW1_STATUS_SHIFT 0
492#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
493
494enum i40e_rx_desc_status_bits { 493enum i40e_rx_desc_status_bits {
495 /* Note: These are predefined bit offsets */ 494 /* Note: These are predefined bit offsets */
496 I40E_RX_DESC_STATUS_DD_SHIFT = 0, 495 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -507,9 +506,14 @@ enum i40e_rx_desc_status_bits {
507 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, 506 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
508 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, 507 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
509 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ 508 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
510 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 509 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
510 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
511}; 511};
512 512
513#define I40E_RXD_QW1_STATUS_SHIFT 0
514#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
515 << I40E_RXD_QW1_STATUS_SHIFT)
516
513#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT 517#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
514#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ 518#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
515 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) 519 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -537,7 +541,8 @@ enum i40e_rx_desc_error_bits {
537 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
538 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
539 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
540 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
541}; 546};
542 547
543enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -658,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
658 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, 663 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
659 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ 664 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
660 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ 665 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
661 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
662 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, 666 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
663 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, 667 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
664 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, 668 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
862 866
863/* Packet Classifier Types for filters */ 867/* Packet Classifier Types for filters */
864enum i40e_filter_pctype { 868enum i40e_filter_pctype {
865 /* Note: Values 0-28 are reserved for future use */ 869 /* Note: Values 0-30 are reserved for future use */
866 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
867 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
868 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, 870 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
869 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, 871 /* Note: Value 32 is reserved for future use */
870 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, 872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
871 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, 873 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
872 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, 874 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
873 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, 875 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
874 /* Note: Values 37-38 are reserved for future use */ 876 /* Note: Values 37-40 are reserved for future use */
875 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
876 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, 877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, 878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, 879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -955,6 +955,16 @@ struct i40e_vsi_context {
955 struct i40e_aqc_vsi_properties_data info; 955 struct i40e_aqc_vsi_properties_data info;
956}; 956};
957 957
958struct i40e_veb_context {
959 u16 seid;
960 u16 uplink_seid;
961 u16 veb_number;
962 u16 vebs_allocated;
963 u16 vebs_unallocated;
964 u16 flags;
965 struct i40e_aqc_get_veb_parameters_completion info;
966};
967
958/* Statistics collected by each port, VSI, VEB, and S-channel */ 968/* Statistics collected by each port, VSI, VEB, and S-channel */
959struct i40e_eth_stats { 969struct i40e_eth_stats {
960 u64 rx_bytes; /* gorc */ 970 u64 rx_bytes; /* gorc */
@@ -962,8 +972,6 @@ struct i40e_eth_stats {
962 u64 rx_multicast; /* mprc */ 972 u64 rx_multicast; /* mprc */
963 u64 rx_broadcast; /* bprc */ 973 u64 rx_broadcast; /* bprc */
964 u64 rx_discards; /* rdpc */ 974 u64 rx_discards; /* rdpc */
965 u64 rx_errors; /* repc */
966 u64 rx_missed; /* rmpc */
967 u64 rx_unknown_protocol; /* rupp */ 975 u64 rx_unknown_protocol; /* rupp */
968 u64 tx_bytes; /* gotc */ 976 u64 tx_bytes; /* gotc */
969 u64 tx_unicast; /* uptc */ 977 u64 tx_unicast; /* uptc */
@@ -1015,9 +1023,12 @@ struct i40e_hw_port_stats {
1015 u64 tx_size_big; /* ptc9522 */ 1023 u64 tx_size_big; /* ptc9522 */
1016 u64 mac_short_packet_dropped; /* mspdc */ 1024 u64 mac_short_packet_dropped; /* mspdc */
1017 u64 checksum_error; /* xec */ 1025 u64 checksum_error; /* xec */
1026 /* flow director stats */
1027 u64 fd_atr_match;
1028 u64 fd_sb_match;
1018 /* EEE LPI */ 1029 /* EEE LPI */
1019 bool tx_lpi_status; 1030 u32 tx_lpi_status;
1020 bool rx_lpi_status; 1031 u32 rx_lpi_status;
1021 u64 tx_lpi_count; /* etlpic */ 1032 u64 tx_lpi_count; /* etlpic */
1022 u64 rx_lpi_count; /* erlpic */ 1033 u64 rx_lpi_count; /* erlpic */
1023}; 1034};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 22a1b69cd646..70951d2edcad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
341 int severity; 341 int severity;
342}; 342};
343 343
344/* The following are TBD, not necessary for LAN functionality.
345 * I40E_VIRTCHNL_OP_FCOE
346 */
347
348/* VF reset states - these are written into the RSTAT register: 344/* VF reset states - these are written into the RSTAT register:
349 * I40E_VFGEN_RSTAT1 on the PF 345 * I40E_VFGEN_RSTAT1 on the PF
350 * I40E_VFGEN_RSTAT on the VF 346 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..f5b9d2062573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
29/***********************misc routines*****************************/ 29/***********************misc routines*****************************/
30 30
31/** 31/**
32 * i40e_vc_disable_vf
33 * @pf: pointer to the pf info
34 * @vf: pointer to the vf info
35 *
36 * Disable the VF through a SW reset
37 **/
38static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
39{
40 struct i40e_hw *hw = &pf->hw;
41 u32 reg;
42
43 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
44 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
45 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
46 i40e_flush(hw);
47}
48
49/**
32 * i40e_vc_isvalid_vsi_id 50 * i40e_vc_isvalid_vsi_id
33 * @vf: pointer to the vf info 51 * @vf: pointer to the vf info
34 * @vsi_id: vf relative vsi id 52 * @vsi_id: vf relative vsi id
@@ -230,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
230 tx_ctx.qlen = info->ring_len; 248 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 249 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0; 250 tx_ctx.rdylist_act = 0;
233 tx_ctx.head_wb_ena = 1; 251 tx_ctx.head_wb_ena = info->headwb_enabled;
234 tx_ctx.head_wb_addr = info->dma_ring_addr + 252 tx_ctx.head_wb_addr = info->dma_headwb_addr;
235 (info->ring_len * sizeof(struct i40e_tx_desc));
236 253
237 /* clear the context in the HMC */ 254 /* clear the context in the HMC */
238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 255 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -336,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
336 rx_ctx.tphhead_ena = 1; 353 rx_ctx.tphhead_ena = 1;
337 rx_ctx.lrxqthresh = 2; 354 rx_ctx.lrxqthresh = 2;
338 rx_ctx.crcstrip = 1; 355 rx_ctx.crcstrip = 1;
356 rx_ctx.prefena = 1;
339 357
340 /* clear the context in the HMC */ 358 /* clear the context in the HMC */
341 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 359 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
416 if (ret) 434 if (ret)
417 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 435 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
418 436
437 /* Set VF bandwidth if specified */
438 if (vf->tx_rate) {
439 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
440 vf->tx_rate / 50, 0, NULL);
441 if (ret)
442 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
443 vf->vf_id, ret);
444 }
445
419error_alloc_vsi_res: 446error_alloc_vsi_res:
420 return ret; 447 return ret;
421} 448}
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
815 kfree(pf->vf); 842 kfree(pf->vf);
816 pf->vf = NULL; 843 pf->vf = NULL;
817 844
845 /* This check is for when the driver is unloaded while VFs are
846 * assigned. Setting the number of VFs to 0 through sysfs is caught
847 * before this function ever gets called.
848 */
818 if (!i40e_vfs_are_assigned(pf)) { 849 if (!i40e_vfs_are_assigned(pf)) {
819 pci_disable_sriov(pf->pdev); 850 pci_disable_sriov(pf->pdev);
820 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 851 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -867,6 +898,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
867 ret = -ENOMEM; 898 ret = -ENOMEM;
868 goto err_alloc; 899 goto err_alloc;
869 } 900 }
901 pf->vf = vfs;
870 902
871 /* apply default profile */ 903 /* apply default profile */
872 for (i = 0; i < num_alloc_vfs; i++) { 904 for (i = 0; i < num_alloc_vfs; i++) {
@@ -876,13 +908,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
876 908
877 /* assign default capabilities */ 909 /* assign default capabilities */
878 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 910 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
911 vfs[i].spoofchk = true;
879 /* vf resources get allocated during reset */ 912 /* vf resources get allocated during reset */
880 i40e_reset_vf(&vfs[i], false); 913 i40e_reset_vf(&vfs[i], false);
881 914
882 /* enable vf vplan_qtable mappings */ 915 /* enable vf vplan_qtable mappings */
883 i40e_enable_vf_mappings(&vfs[i]); 916 i40e_enable_vf_mappings(&vfs[i]);
884 } 917 }
885 pf->vf = vfs;
886 pf->num_alloc_vfs = num_alloc_vfs; 918 pf->num_alloc_vfs = num_alloc_vfs;
887 919
888 i40e_enable_pf_switch_lb(pf); 920 i40e_enable_pf_switch_lb(pf);
@@ -951,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
951 if (num_vfs) 983 if (num_vfs)
952 return i40e_pci_sriov_enable(pdev, num_vfs); 984 return i40e_pci_sriov_enable(pdev, num_vfs);
953 985
954 i40e_free_vfs(pf); 986 if (!i40e_vfs_are_assigned(pf)) {
987 i40e_free_vfs(pf);
988 } else {
989 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
990 return -EINVAL;
991 }
955 return 0; 992 return 0;
956} 993}
957 994
@@ -2022,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2022 } 2059 }
2023 2060
2024 /* delete the temporary mac address */ 2061 /* delete the temporary mac address */
2025 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); 2062 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2063 true, false);
2026 2064
2027 /* add the new mac address */ 2065 /* Delete all the filters for this VSI - we're going to kill it
2028 f = i40e_add_filter(vsi, mac, 0, true, false); 2066 * anyway.
2029 if (!f) { 2067 */
2030 dev_err(&pf->pdev->dev, 2068 list_for_each_entry(f, &vsi->mac_filter_list, list)
2031 "Unable to add VF ucast filter\n"); 2069 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2032 ret = -ENOMEM;
2033 goto error_param;
2034 }
2035 2070
2036 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2071 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2037 /* program mac filter */ 2072 /* program mac filter */
@@ -2040,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2040 ret = -EIO; 2075 ret = -EIO;
2041 goto error_param; 2076 goto error_param;
2042 } 2077 }
2043 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); 2078 ether_addr_copy(vf->default_lan_addr.addr, mac);
2044 vf->pf_set_mac = true; 2079 vf->pf_set_mac = true;
2045 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2080 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2046 ret = 0; 2081 ret = 0;
@@ -2088,18 +2123,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2088 goto error_pvid; 2123 goto error_pvid;
2089 } 2124 }
2090 2125
2091 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) 2126 if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
2092 dev_err(&pf->pdev->dev, 2127 dev_err(&pf->pdev->dev,
2093 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", 2128 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2094 vf_id); 2129 vf_id);
2130 /* Administrator Error - knock the VF offline until he does
2131 * the right thing by reconfiguring his network correctly
2132 * and then reloading the VF driver.
2133 */
2134 i40e_vc_disable_vf(pf, vf);
2135 }
2095 2136
2096 /* Check for condition where there was already a port VLAN ID 2137 /* Check for condition where there was already a port VLAN ID
2097 * filter set and now it is being deleted by setting it to zero. 2138 * filter set and now it is being deleted by setting it to zero.
2139 * Additionally check for the condition where there was a port
2140 * VLAN but now there is a new and different port VLAN being set.
2098 * Before deleting all the old VLAN filters we must add new ones 2141 * Before deleting all the old VLAN filters we must add new ones
2099 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our 2142 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2100 * MAC addresses deleted. 2143 * MAC addresses deleted.
2101 */ 2144 */
2102 if (!(vlan_id || qos) && vsi->info.pvid) 2145 if ((!(vlan_id || qos) ||
2146 (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
2147 vsi->info.pvid)
2103 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); 2148 ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2104 2149
2105 if (vsi->info.pvid) { 2150 if (vsi->info.pvid) {
@@ -2150,6 +2195,8 @@ error_pvid:
2150 return ret; 2195 return ret;
2151} 2196}
2152 2197
2198#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
2199#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
2153/** 2200/**
2154 * i40e_ndo_set_vf_bw 2201 * i40e_ndo_set_vf_bw
2155 * @netdev: network interface device structure 2202 * @netdev: network interface device structure
@@ -2158,9 +2205,76 @@ error_pvid:
2158 * 2205 *
2159 * configure vf tx rate 2206 * configure vf tx rate
2160 **/ 2207 **/
2161int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) 2208int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2209 int max_tx_rate)
2162{ 2210{
2163 return -EOPNOTSUPP; 2211 struct i40e_netdev_priv *np = netdev_priv(netdev);
2212 struct i40e_pf *pf = np->vsi->back;
2213 struct i40e_vsi *vsi;
2214 struct i40e_vf *vf;
2215 int speed = 0;
2216 int ret = 0;
2217
2218 /* validate the request */
2219 if (vf_id >= pf->num_alloc_vfs) {
2220 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2221 ret = -EINVAL;
2222 goto error;
2223 }
2224
2225 if (min_tx_rate) {
2226 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2227 min_tx_rate, vf_id);
2228 return -EINVAL;
2229 }
2230
2231 vf = &(pf->vf[vf_id]);
2232 vsi = pf->vsi[vf->lan_vsi_index];
2233 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2234 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2235 ret = -EINVAL;
2236 goto error;
2237 }
2238
2239 switch (pf->hw.phy.link_info.link_speed) {
2240 case I40E_LINK_SPEED_40GB:
2241 speed = 40000;
2242 break;
2243 case I40E_LINK_SPEED_10GB:
2244 speed = 10000;
2245 break;
2246 case I40E_LINK_SPEED_1GB:
2247 speed = 1000;
2248 break;
2249 default:
2250 break;
2251 }
2252
2253 if (max_tx_rate > speed) {
2254 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
2255 max_tx_rate, vf->vf_id);
2256 ret = -EINVAL;
2257 goto error;
2258 }
2259
2260 if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2261 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2262 max_tx_rate = 50;
2263 }
2264
2265 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2266 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2267 max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2268 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2269 if (ret) {
2270 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2271 ret);
2272 ret = -EIO;
2273 goto error;
2274 }
2275 vf->tx_rate = max_tx_rate;
2276error:
2277 return ret;
2164} 2278}
2165 2279
2166/** 2280/**
@@ -2200,10 +2314,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2200 2314
2201 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); 2315 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2202 2316
2203 ivi->tx_rate = 0; 2317 ivi->max_tx_rate = vf->tx_rate;
2318 ivi->min_tx_rate = 0;
2204 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; 2319 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2205 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> 2320 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2206 I40E_VLAN_PRIORITY_SHIFT; 2321 I40E_VLAN_PRIORITY_SHIFT;
2322 if (vf->link_forced == false)
2323 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2324 else if (vf->link_up == true)
2325 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2326 else
2327 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2328 ivi->spoofchk = vf->spoofchk;
2207 ret = 0; 2329 ret = 0;
2208 2330
2209error_param: 2331error_param:
@@ -2270,3 +2392,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2270error_out: 2392error_out:
2271 return ret; 2393 return ret;
2272} 2394}
2395
2396/**
2397 * i40e_ndo_set_vf_spoofchk
2398 * @netdev: network interface device structure
2399 * @vf_id: vf identifier
2400 * @enable: flag to enable or disable feature
2401 *
2402 * Enable or disable VF spoof checking
2403 **/
2404int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
2405{
2406 struct i40e_netdev_priv *np = netdev_priv(netdev);
2407 struct i40e_vsi *vsi = np->vsi;
2408 struct i40e_pf *pf = vsi->back;
2409 struct i40e_vsi_context ctxt;
2410 struct i40e_hw *hw = &pf->hw;
2411 struct i40e_vf *vf;
2412 int ret = 0;
2413
2414 /* validate the request */
2415 if (vf_id >= pf->num_alloc_vfs) {
2416 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2417 ret = -EINVAL;
2418 goto out;
2419 }
2420
2421 vf = &(pf->vf[vf_id]);
2422
2423 if (enable == vf->spoofchk)
2424 goto out;
2425
2426 vf->spoofchk = enable;
2427 memset(&ctxt, 0, sizeof(ctxt));
2428 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
2429 ctxt.pf_num = pf->hw.pf_id;
2430 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2431 if (enable)
2432 ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
2433 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2434 if (ret) {
2435 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2436 ret);
2437 ret = -EIO;
2438 }
2439out:
2440 return ret;
2441}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..63e7e0d81ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,8 +98,10 @@ struct i40e_vf {
98 98
99 unsigned long vf_caps; /* vf's adv. capabilities */ 99 unsigned long vf_caps; /* vf's adv. capabilities */
100 unsigned long vf_states; /* vf's runtime states */ 100 unsigned long vf_states; /* vf's runtime states */
101 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
101 bool link_forced; 102 bool link_forced;
102 bool link_up; /* only valid if vf link is forced */ 103 bool link_up; /* only valid if vf link is forced */
104 bool spoofchk;
103}; 105};
104 106
105void i40e_free_vfs(struct i40e_pf *pf); 107void i40e_free_vfs(struct i40e_pf *pf);
@@ -115,10 +117,12 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
115int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); 117int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
116int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, 118int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
117 int vf_id, u16 vlan_id, u8 qos); 119 int vf_id, u16 vlan_id, u8 qos);
118int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); 120int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
121 int max_tx_rate);
119int i40e_ndo_get_vf_config(struct net_device *netdev, 122int i40e_ndo_get_vf_config(struct net_device *netdev,
120 int vf_id, struct ifla_vf_info *ivi); 123 int vf_id, struct ifla_vf_info *ivi);
121int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 124int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
125int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
122 126
123void i40e_vc_notify_link_state(struct i40e_pf *pf); 127void i40e_vc_notify_link_state(struct i40e_pf *pf);
124void i40e_vc_notify_reset(struct i40e_pf *pf); 128void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index e09be37a07a8..3a423836a565 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4# Copyright(c) 2013 Intel Corporation. 4# Copyright(c) 2013 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along
16# with this program. If not, see <http://www.gnu.org/licenses/>.
17#
15# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
16# the file called "COPYING". 19# the file called "COPYING".
17# 20#
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..eb67cce3e8f9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -28,6 +31,16 @@
28#include "i40e_prototype.h" 31#include "i40e_prototype.h"
29 32
30/** 33/**
34 * i40e_is_nvm_update_op - return true if this is an NVM update operation
35 * @desc: API request descriptor
36 **/
37static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
38{
39 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
40 (desc->opcode == i40e_aqc_opc_nvm_update);
41}
42
43/**
31 * i40e_adminq_init_regs - Initialize AdminQ registers 44 * i40e_adminq_init_regs - Initialize AdminQ registers
32 * @hw: pointer to the hardware structure 45 * @hw: pointer to the hardware structure
33 * 46 *
@@ -276,8 +289,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
276 * 289 *
277 * Configure base address and length registers for the transmit queue 290 * Configure base address and length registers for the transmit queue
278 **/ 291 **/
279static void i40e_config_asq_regs(struct i40e_hw *hw) 292static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
280{ 293{
294 i40e_status ret_code = 0;
295 u32 reg = 0;
296
281 if (hw->mac.type == I40E_MAC_VF) { 297 if (hw->mac.type == I40E_MAC_VF) {
282 /* configure the transmit queue */ 298 /* configure the transmit queue */
283 wr32(hw, I40E_VF_ATQBAH1, 299 wr32(hw, I40E_VF_ATQBAH1,
@@ -286,6 +302,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
286 lower_32_bits(hw->aq.asq.desc_buf.pa)); 302 lower_32_bits(hw->aq.asq.desc_buf.pa));
287 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | 303 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
288 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 304 I40E_VF_ATQLEN1_ATQENABLE_MASK));
305 reg = rd32(hw, I40E_VF_ATQBAL1);
289 } else { 306 } else {
290 /* configure the transmit queue */ 307 /* configure the transmit queue */
291 wr32(hw, I40E_PF_ATQBAH, 308 wr32(hw, I40E_PF_ATQBAH,
@@ -294,7 +311,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
294 lower_32_bits(hw->aq.asq.desc_buf.pa)); 311 lower_32_bits(hw->aq.asq.desc_buf.pa));
295 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | 312 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
296 I40E_PF_ATQLEN_ATQENABLE_MASK)); 313 I40E_PF_ATQLEN_ATQENABLE_MASK));
314 reg = rd32(hw, I40E_PF_ATQBAL);
297 } 315 }
316
317 /* Check one register to verify that config was applied */
318 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
319 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
320
321 return ret_code;
298} 322}
299 323
300/** 324/**
@@ -303,8 +327,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
303 * 327 *
304 * Configure base address and length registers for the receive (event queue) 328 * Configure base address and length registers for the receive (event queue)
305 **/ 329 **/
306static void i40e_config_arq_regs(struct i40e_hw *hw) 330static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
307{ 331{
332 i40e_status ret_code = 0;
333 u32 reg = 0;
334
308 if (hw->mac.type == I40E_MAC_VF) { 335 if (hw->mac.type == I40E_MAC_VF) {
309 /* configure the receive queue */ 336 /* configure the receive queue */
310 wr32(hw, I40E_VF_ARQBAH1, 337 wr32(hw, I40E_VF_ARQBAH1,
@@ -313,6 +340,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
313 lower_32_bits(hw->aq.arq.desc_buf.pa)); 340 lower_32_bits(hw->aq.arq.desc_buf.pa));
314 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | 341 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
315 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 342 I40E_VF_ARQLEN1_ARQENABLE_MASK));
343 reg = rd32(hw, I40E_VF_ARQBAL1);
316 } else { 344 } else {
317 /* configure the receive queue */ 345 /* configure the receive queue */
318 wr32(hw, I40E_PF_ARQBAH, 346 wr32(hw, I40E_PF_ARQBAH,
@@ -321,10 +349,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
321 lower_32_bits(hw->aq.arq.desc_buf.pa)); 349 lower_32_bits(hw->aq.arq.desc_buf.pa));
322 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | 350 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
323 I40E_PF_ARQLEN_ARQENABLE_MASK)); 351 I40E_PF_ARQLEN_ARQENABLE_MASK));
352 reg = rd32(hw, I40E_PF_ARQBAL);
324 } 353 }
325 354
326 /* Update tail in the HW to post pre-allocated buffers */ 355 /* Update tail in the HW to post pre-allocated buffers */
327 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 356 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
357
358 /* Check one register to verify that config was applied */
359 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
360 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
361
362 return ret_code;
328} 363}
329 364
330/** 365/**
@@ -372,7 +407,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
372 goto init_adminq_free_rings; 407 goto init_adminq_free_rings;
373 408
374 /* initialize base registers */ 409 /* initialize base registers */
375 i40e_config_asq_regs(hw); 410 ret_code = i40e_config_asq_regs(hw);
411 if (ret_code)
412 goto init_adminq_free_rings;
376 413
377 /* success! */ 414 /* success! */
378 goto init_adminq_exit; 415 goto init_adminq_exit;
@@ -429,7 +466,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
429 goto init_adminq_free_rings; 466 goto init_adminq_free_rings;
430 467
431 /* initialize base registers */ 468 /* initialize base registers */
432 i40e_config_arq_regs(hw); 469 ret_code = i40e_config_arq_regs(hw);
470 if (ret_code)
471 goto init_adminq_free_rings;
433 472
434 /* success! */ 473 /* success! */
435 goto init_adminq_exit; 474 goto init_adminq_exit;
@@ -659,6 +698,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
659 goto asq_send_command_exit; 698 goto asq_send_command_exit;
660 } 699 }
661 700
701 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
702 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
703 status = I40E_ERR_NVM;
704 goto asq_send_command_exit;
705 }
706
662 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 707 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
663 if (cmd_details) { 708 if (cmd_details) {
664 *details = *cmd_details; 709 *details = *cmd_details;
@@ -786,6 +831,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
786 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 831 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
787 } 832 }
788 833
834 if (i40e_is_nvm_update_op(desc))
835 hw->aq.nvm_busy = true;
836
789 /* update the error if time out occurred */ 837 /* update the error if time out occurred */
790 if ((!cmd_completed) && 838 if ((!cmd_completed) &&
791 (!details->async && !details->postpone)) { 839 (!details->async && !details->postpone)) {
@@ -880,6 +928,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
880 e->msg_size); 928 e->msg_size);
881 } 929 }
882 930
931 if (i40e_is_nvm_update_op(&e->desc))
932 hw->aq.nvm_busy = false;
933
883 /* Restore the original datalen and buffer address in the desc, 934 /* Restore the original datalen and buffer address in the desc,
884 * FW updates datalen to indicate the event message 935 * FW updates datalen to indicate the event message
885 * size 936 * size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..e3472c62e155 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
87 u16 fw_min_ver; /* firmware minor version */ 90 u16 fw_min_ver; /* firmware minor version */
88 u16 api_maj_ver; /* api major version */ 91 u16 api_maj_ver; /* api major version */
89 u16 api_min_ver; /* api minor version */ 92 u16 api_min_ver; /* api minor version */
93 bool nvm_busy;
90 94
91 struct mutex asq_mutex; /* Send queue lock */ 95 struct mutex asq_mutex; /* Send queue lock */
92 struct mutex arq_mutex; /* Receive queue lock */ 96 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..e656ea7a7920 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -31,7 +34,7 @@
31 */ 34 */
32 35
33#define I40E_FW_API_VERSION_MAJOR 0x0001 36#define I40E_FW_API_VERSION_MAJOR 0x0001
34#define I40E_FW_API_VERSION_MINOR 0x0001 37#define I40E_FW_API_VERSION_MINOR 0x0002
35#define I40E_FW_API_VERSION_A0_MINOR 0x0000 38#define I40E_FW_API_VERSION_A0_MINOR 0x0000
36 39
37struct i40e_aq_desc { 40struct i40e_aq_desc {
@@ -121,6 +124,7 @@ enum i40e_admin_queue_opc {
121 i40e_aqc_opc_get_version = 0x0001, 124 i40e_aqc_opc_get_version = 0x0001,
122 i40e_aqc_opc_driver_version = 0x0002, 125 i40e_aqc_opc_driver_version = 0x0002,
123 i40e_aqc_opc_queue_shutdown = 0x0003, 126 i40e_aqc_opc_queue_shutdown = 0x0003,
127 i40e_aqc_opc_set_pf_context = 0x0004,
124 128
125 /* resource ownership */ 129 /* resource ownership */
126 i40e_aqc_opc_request_resource = 0x0008, 130 i40e_aqc_opc_request_resource = 0x0008,
@@ -180,9 +184,6 @@ enum i40e_admin_queue_opc {
180 i40e_aqc_opc_add_mirror_rule = 0x0260, 184 i40e_aqc_opc_add_mirror_rule = 0x0260,
181 i40e_aqc_opc_delete_mirror_rule = 0x0261, 185 i40e_aqc_opc_delete_mirror_rule = 0x0261,
182 186
183 i40e_aqc_opc_set_storm_control_config = 0x0280,
184 i40e_aqc_opc_get_storm_control_config = 0x0281,
185
186 /* DCB commands */ 187 /* DCB commands */
187 i40e_aqc_opc_dcb_ignore_pfc = 0x0301, 188 i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
188 i40e_aqc_opc_dcb_updated = 0x0302, 189 i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +206,7 @@ enum i40e_admin_queue_opc {
205 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, 206 i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
206 i40e_aqc_opc_suspend_port_tx = 0x041B, 207 i40e_aqc_opc_suspend_port_tx = 0x041B,
207 i40e_aqc_opc_resume_port_tx = 0x041C, 208 i40e_aqc_opc_resume_port_tx = 0x041C,
209 i40e_aqc_opc_configure_partition_bw = 0x041D,
208 210
209 /* hmc */ 211 /* hmc */
210 i40e_aqc_opc_query_hmc_resource_profile = 0x0500, 212 i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +224,15 @@ enum i40e_admin_queue_opc {
222 i40e_aqc_opc_get_partner_advt = 0x0616, 224 i40e_aqc_opc_get_partner_advt = 0x0616,
223 i40e_aqc_opc_set_lb_modes = 0x0618, 225 i40e_aqc_opc_set_lb_modes = 0x0618,
224 i40e_aqc_opc_get_phy_wol_caps = 0x0621, 226 i40e_aqc_opc_get_phy_wol_caps = 0x0621,
225 i40e_aqc_opc_set_phy_reset = 0x0622, 227 i40e_aqc_opc_set_phy_debug = 0x0622,
226 i40e_aqc_opc_upload_ext_phy_fm = 0x0625, 228 i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
227 229
228 /* NVM commands */ 230 /* NVM commands */
229 i40e_aqc_opc_nvm_read = 0x0701, 231 i40e_aqc_opc_nvm_read = 0x0701,
230 i40e_aqc_opc_nvm_erase = 0x0702, 232 i40e_aqc_opc_nvm_erase = 0x0702,
231 i40e_aqc_opc_nvm_update = 0x0703, 233 i40e_aqc_opc_nvm_update = 0x0703,
234 i40e_aqc_opc_nvm_config_read = 0x0704,
235 i40e_aqc_opc_nvm_config_write = 0x0705,
232 236
233 /* virtualization commands */ 237 /* virtualization commands */
234 i40e_aqc_opc_send_msg_to_pf = 0x0801, 238 i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +274,6 @@ enum i40e_admin_queue_opc {
270 i40e_aqc_opc_debug_set_mode = 0xFF01, 274 i40e_aqc_opc_debug_set_mode = 0xFF01,
271 i40e_aqc_opc_debug_read_reg = 0xFF03, 275 i40e_aqc_opc_debug_read_reg = 0xFF03,
272 i40e_aqc_opc_debug_write_reg = 0xFF04, 276 i40e_aqc_opc_debug_write_reg = 0xFF04,
273 i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
274 i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
275 i40e_aqc_opc_debug_modify_reg = 0xFF07, 277 i40e_aqc_opc_debug_modify_reg = 0xFF07,
276 i40e_aqc_opc_debug_dump_internals = 0xFF08, 278 i40e_aqc_opc_debug_dump_internals = 0xFF08,
277 i40e_aqc_opc_debug_modify_internals = 0xFF09, 279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -339,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
339 341
340I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); 342I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
341 343
344/* Set PF context (0x0004, direct) */
345struct i40e_aqc_set_pf_context {
346 u8 pf_id;
347 u8 reserved[15];
348};
349
350I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
351
342/* Request resource ownership (direct 0x0008) 352/* Request resource ownership (direct 0x0008)
343 * Release resource ownership (direct 0x0009) 353 * Release resource ownership (direct 0x0009)
344 */ 354 */
@@ -678,7 +688,6 @@ struct i40e_aqc_add_get_update_vsi {
678#define I40E_AQ_VSI_TYPE_PF 0x2 688#define I40E_AQ_VSI_TYPE_PF 0x2
679#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 689#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
680#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 690#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
681#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
682 __le32 addr_high; 691 __le32 addr_high;
683 __le32 addr_low; 692 __le32 addr_low;
684}; 693};
@@ -1040,7 +1049,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
1040#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 1049#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1041 __le16 seid; 1050 __le16 seid;
1042#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF 1051#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1043 u8 reserved[10]; 1052 __le16 vlan_tag;
1053#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
1054 u8 reserved[8];
1044}; 1055};
1045 1056
1046I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); 1057I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1300,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
1289 1300
1290I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); 1301I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
1291 1302
1292/* Set Storm Control Configuration (direct 0x0280)
1293 * Get Storm Control Configuration (direct 0x0281)
1294 * the command and response use the same descriptor structure
1295 */
1296struct i40e_aqc_set_get_storm_control_config {
1297 __le32 broadcast_threshold;
1298 __le32 multicast_threshold;
1299 __le32 control_flags;
1300#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
1301#define I40E_AQC_STORM_CONTROL_MDICW 0x02
1302#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
1303#define I40E_AQC_STORM_CONTROL_BDICW 0x08
1304#define I40E_AQC_STORM_CONTROL_BIDU 0x10
1305#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
1306#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
1307 I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
1308 u8 reserved[4];
1309};
1310
1311I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
1312
1313/* DCB 0x03xx*/ 1303/* DCB 0x03xx*/
1314 1304
1315/* PFC Ignore (direct 0x0301) 1305/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
1427struct i40e_aqc_configure_switching_comp_ets_data { 1417struct i40e_aqc_configure_switching_comp_ets_data {
1428 u8 reserved[4]; 1418 u8 reserved[4];
1429 u8 tc_valid_bits; 1419 u8 tc_valid_bits;
1430 u8 reserved1; 1420 u8 seepage;
1421#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
1431 u8 tc_strict_priority_flags; 1422 u8 tc_strict_priority_flags;
1432 u8 reserved2[17]; 1423 u8 reserved1[17];
1433 u8 tc_bw_share_credits[8]; 1424 u8 tc_bw_share_credits[8];
1434 u8 reserved3[96]; 1425 u8 reserved2[96];
1435}; 1426};
1436 1427
1437/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1428/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1490,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1499 * (direct 0x041B and 0x041C) uses the generic SEID struct 1490 * (direct 0x041B and 0x041C) uses the generic SEID struct
1500 */ 1491 */
1501 1492
1493/* Configure partition BW
1494 * (indirect 0x041D)
1495 */
1496struct i40e_aqc_configure_partition_bw_data {
1497 __le16 pf_valid_bits;
1498 u8 min_bw[16]; /* guaranteed bandwidth */
1499 u8 max_bw[16]; /* bandwidth limit */
1500};
1501
1502/* Get and set the active HMC resource profile and status. 1502/* Get and set the active HMC resource profile and status.
1503 * (direct 0x0500) and (direct 0x0501) 1503 * (direct 0x0500) and (direct 0x0501)
1504 */ 1504 */
@@ -1539,6 +1539,8 @@ enum i40e_aq_phy_type {
1539 I40E_PHY_TYPE_XLPPI = 0x9, 1539 I40E_PHY_TYPE_XLPPI = 0x9,
1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, 1540 I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, 1541 I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
1542 I40E_PHY_TYPE_10GBASE_AOC = 0xC,
1543 I40E_PHY_TYPE_40GBASE_AOC = 0xD,
1542 I40E_PHY_TYPE_100BASE_TX = 0x11, 1544 I40E_PHY_TYPE_100BASE_TX = 0x11,
1543 I40E_PHY_TYPE_1000BASE_T = 0x12, 1545 I40E_PHY_TYPE_1000BASE_T = 0x12,
1544 I40E_PHY_TYPE_10GBASE_T = 0x13, 1546 I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1551,10 @@ enum i40e_aq_phy_type {
1549 I40E_PHY_TYPE_40GBASE_CR4 = 0x18, 1551 I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
1550 I40E_PHY_TYPE_40GBASE_SR4 = 0x19, 1552 I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
1551 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, 1553 I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
1552 I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, 1554 I40E_PHY_TYPE_1000BASE_SX = 0x1B,
1555 I40E_PHY_TYPE_1000BASE_LX = 0x1C,
1556 I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
1557 I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
1553 I40E_PHY_TYPE_MAX 1558 I40E_PHY_TYPE_MAX
1554}; 1559};
1555 1560
@@ -1583,11 +1588,8 @@ struct i40e_aq_get_phy_abilities_resp {
1583#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 1588#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
1584#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 1589#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
1585#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 1590#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
1586#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 1591#define I40E_AQ_PHY_LINK_ENABLED 0x08
1587#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) 1592#define I40E_AQ_PHY_AN_ENABLED 0x10
1588#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
1589#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
1590#define I40E_AQ_PHY_FLAG_AN_ON 0x02
1591#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 1593#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
1592 __le16 eee_capability; 1594 __le16 eee_capability;
1593#define I40E_AQ_EEE_100BASE_TX 0x0002 1595#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1698,7 @@ struct i40e_aqc_get_link_status {
1696#define I40E_AQ_LINK_TX_ACTIVE 0x00 1698#define I40E_AQ_LINK_TX_ACTIVE 0x00
1697#define I40E_AQ_LINK_TX_DRAINED 0x01 1699#define I40E_AQ_LINK_TX_DRAINED 0x01
1698#define I40E_AQ_LINK_TX_FLUSHED 0x03 1700#define I40E_AQ_LINK_TX_FLUSHED 0x03
1701#define I40E_AQ_LINK_FORCED_40G 0x10
1699 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ 1702 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
1700 __le16 max_frame_size; 1703 __le16 max_frame_size;
1701 u8 config; 1704 u8 config;
@@ -1747,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
1747 1750
1748I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); 1751I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
1749 1752
1750/* Set PHY Reset command (0x0622) */ 1753/* Set PHY Debug command (0x0622) */
1751struct i40e_aqc_set_phy_reset { 1754struct i40e_aqc_set_phy_debug {
1752 u8 reset_flags; 1755 u8 command_flags;
1753#define I40E_AQ_PHY_RESET_REQUEST 0x02 1756#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
1757#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
1758#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
1759 I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
1760#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
1761#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
1762#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
1763#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
1754 u8 reserved[15]; 1764 u8 reserved[15];
1755}; 1765};
1756 1766
1757I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); 1767I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
1758 1768
1759enum i40e_aq_phy_reg_type { 1769enum i40e_aq_phy_reg_type {
1760 I40E_AQC_PHY_REG_INTERNAL = 0x1, 1770 I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1789,47 @@ struct i40e_aqc_nvm_update {
1779 1789
1780I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); 1790I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1781 1791
1792/* NVM Config Read (indirect 0x0704) */
1793struct i40e_aqc_nvm_config_read {
1794 __le16 cmd_flags;
1795#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1796#define ANVM_READ_SINGLE_FEATURE 0
1797#define ANVM_READ_MULTIPLE_FEATURES 1
1798 __le16 element_count;
1799 __le16 element_id; /* Feature/field ID */
1800 u8 reserved[2];
1801 __le32 address_high;
1802 __le32 address_low;
1803};
1804
1805I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
1806
1807/* NVM Config Write (indirect 0x0705) */
1808struct i40e_aqc_nvm_config_write {
1809 __le16 cmd_flags;
1810 __le16 element_count;
1811 u8 reserved[4];
1812 __le32 address_high;
1813 __le32 address_low;
1814};
1815
1816I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1817
1818struct i40e_aqc_nvm_config_data_feature {
1819 __le16 feature_id;
1820 __le16 instance_id;
1821 __le16 feature_options;
1822 __le16 feature_selection;
1823};
1824
1825struct i40e_aqc_nvm_config_data_immediate_field {
1826#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
1827 __le16 field_id;
1828 __le16 instance_id;
1829 __le16 field_options;
1830 __le16 field_value;
1831};
1832
1782/* Send to PF command (indirect 0x0801) id is only used by PF 1833/* Send to PF command (indirect 0x0801) id is only used by PF
1783 * Send to VF command (indirect 0x0802) id is only used by PF 1834 * Send to VF command (indirect 0x0802) id is only used by PF
1784 * Send to Peer PF command (indirect 0x0803) 1835 * Send to Peer PF command (indirect 0x0803)
@@ -1948,19 +1999,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
1948/* Add Udp Tunnel command and completion (direct 0x0B00) */ 1999/* Add Udp Tunnel command and completion (direct 0x0B00) */
1949struct i40e_aqc_add_udp_tunnel { 2000struct i40e_aqc_add_udp_tunnel {
1950 __le16 udp_port; 2001 __le16 udp_port;
1951 u8 header_len; /* in DWords, 1 to 15 */ 2002 u8 reserved0[3];
1952 u8 protocol_type; 2003 u8 protocol_type;
1953#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0 2004#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
1954#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2 2005#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
1955#define I40E_AQC_TUNNEL_TYPE_NGE 0x3 2006#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
1956 u8 variable_udp_length; 2007 u8 reserved1[10];
1957#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
1958#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
1959 u8 udp_key_index;
1960#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
1961#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
1962#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
1963 u8 reserved[10];
1964}; 2008};
1965 2009
1966I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); 2010I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index d8654fb9e525..8e6a6dd9212b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index ae084378faab..a43155afdbe2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -40,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
40 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
41 switch (hw->device_id) { 44 switch (hw->device_id) {
42 case I40E_DEV_ID_SFP_XL710: 45 case I40E_DEV_ID_SFP_XL710:
43 case I40E_DEV_ID_SFP_X710:
44 case I40E_DEV_ID_QEMU: 46 case I40E_DEV_ID_QEMU:
45 case I40E_DEV_ID_KX_A: 47 case I40E_DEV_ID_KX_A:
46 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_B:
47 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_KX_C:
48 case I40E_DEV_ID_KX_D:
49 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_A:
50 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_B:
51 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_QSFP_C:
@@ -130,7 +131,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
130 **/ 131 **/
131bool i40evf_check_asq_alive(struct i40e_hw *hw) 132bool i40evf_check_asq_alive(struct i40e_hw *hw)
132{ 133{
133 return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); 134 if (hw->aq.asq.len)
135 return !!(rd32(hw, hw->aq.asq.len) &
136 I40E_PF_ATQLEN_ATQENABLE_MASK);
137 else
138 return false;
134} 139}
135 140
136/** 141/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index cb97b3eed440..a2ad9a4e399d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -160,11 +163,6 @@ struct i40e_hmc_info {
160 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ 163 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
161 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
162 165
163#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
164 wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
165 (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
166 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
167
168/** 166/**
169 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit 167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
170 * @hmc_info: pointer to the HMC configuration information structure 168 * @hmc_info: pointer to the HMC configuration information structure
@@ -223,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
223 u32 pd_index); 221 u32 pd_index);
224i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 222i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
225 struct i40e_hmc_info *hmc_info, 223 struct i40e_hmc_info *hmc_info,
226 u32 idx, bool is_pf); 224 u32 idx);
227i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 225i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
228 u32 idx); 226 u32 idx);
229i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 227i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..d6f762241537 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
53 u8 tphdata_ena; 56 u8 tphdata_ena;
54 u8 tphhead_ena; 57 u8 tphhead_ena;
55 u8 lrxqthresh; 58 u8 lrxqthresh;
59 u8 prefena; /* NOTE: normally must be set to 1 at init */
56}; 60};
57 61
58/* Tx queue context data */ 62/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 622f373b745d..21a91b14bf81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 97ab8c2b76f8..849edcc2e398 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 30af953cf106..369839655818 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -1337,8 +1340,6 @@
1337#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) 1340#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
1338#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 1341#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
1339#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) 1342#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
1340#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
1341#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
1342#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
1343#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) 1344#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
1344#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 1345#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1364,8 +1365,6 @@
1364#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) 1365#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
1365#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
1366#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) 1367#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
1367#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
1368#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 1368#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
1370#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) 1369#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
1371#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 1370#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1586,6 +1585,14 @@
1586#define I40E_GLLAN_TSOMSK_M 0x000442DC 1585#define I40E_GLLAN_TSOMSK_M 0x000442DC
1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 1586#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
1588#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) 1587#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
1588#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
1589#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
1590#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
1591#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
1592#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
1593#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
1594#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
1595
1589#define I40E_PFLAN_QALLOC 0x001C0400 1596#define I40E_PFLAN_QALLOC 0x001C0400
1590#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 1597#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
1591#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) 1598#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7c08cc2e339b..7fa7a41915c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b9f50f40abe1..48ebb6cd69f2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -725,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
725 u32 rx_error, 728 u32 rx_error,
726 u16 rx_ptype) 729 u16 rx_ptype)
727{ 730{
731 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
732 bool ipv4 = false, ipv6 = false;
728 bool ipv4_tunnel, ipv6_tunnel; 733 bool ipv4_tunnel, ipv6_tunnel;
729 __wsum rx_udp_csum; 734 __wsum rx_udp_csum;
730 __sum16 csum;
731 struct iphdr *iph; 735 struct iphdr *iph;
736 __sum16 csum;
732 737
733 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 738 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
734 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 739 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -739,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
739 skb->ip_summed = CHECKSUM_NONE; 744 skb->ip_summed = CHECKSUM_NONE;
740 745
741 /* Rx csum enabled and ip headers found? */ 746 /* Rx csum enabled and ip headers found? */
742 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 747 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
743 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
744 return; 748 return;
745 749
750 /* did the hardware decode the packet and checksum? */
751 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
752 return;
753
754 /* both known and outer_ip must be set for the below code to work */
755 if (!(decoded.known && decoded.outer_ip))
756 return;
757
758 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
759 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
760 ipv4 = true;
761 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
762 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
763 ipv6 = true;
764
765 if (ipv4 &&
766 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
767 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
768 goto checksum_fail;
769
746 /* likely incorrect csum if alternate IP extension headers found */ 770 /* likely incorrect csum if alternate IP extension headers found */
747 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 771 if (ipv6 &&
772 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
773 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
774 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
775 /* don't increment checksum err here, non-fatal err */
748 return; 776 return;
749 777
750 /* IP or L4 or outmost IP checksum error */ 778 /* there was some L4 error, count error and punt packet to the stack */
751 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 779 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
752 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 780 goto checksum_fail;
753 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 781
754 vsi->back->hw_csum_rx_error++; 782 /* handle packets that were not able to be checksummed due
783 * to arrival speed, in this case the stack can compute
784 * the csum.
785 */
786 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
755 return; 787 return;
756 }
757 788
789 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
790 * it in the driver, hardware does not do it for us.
791 * Since L3L4P bit was set we assume a valid IHL value (>=5)
792 * so the total length of IPv4 header is IHL*4 bytes
793 * The UDP_0 bit *may* bet set if the *inner* header is UDP
794 */
758 if (ipv4_tunnel && 795 if (ipv4_tunnel &&
796 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
759 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 797 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
760 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
761 * it in the driver, hardware does not do it for us.
762 * Since L3L4P bit was set we assume a valid IHL value (>=5)
763 * so the total length of IPv4 header is IHL*4 bytes
764 */
765 skb->transport_header = skb->mac_header + 798 skb->transport_header = skb->mac_header +
766 sizeof(struct ethhdr) + 799 sizeof(struct ethhdr) +
767 (ip_hdr(skb)->ihl * 4); 800 (ip_hdr(skb)->ihl * 4);
@@ -778,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
778 (skb->len - skb_transport_offset(skb)), 811 (skb->len - skb_transport_offset(skb)),
779 IPPROTO_UDP, rx_udp_csum); 812 IPPROTO_UDP, rx_udp_csum);
780 813
781 if (udp_hdr(skb)->check != csum) { 814 if (udp_hdr(skb)->check != csum)
782 vsi->back->hw_csum_rx_error++; 815 goto checksum_fail;
783 return;
784 }
785 } 816 }
786 817
787 skb->ip_summed = CHECKSUM_UNNECESSARY; 818 skb->ip_summed = CHECKSUM_UNNECESSARY;
819
820 return;
821
822checksum_fail:
823 vsi->back->hw_csum_rx_error++;
788} 824}
789 825
790/** 826/**
@@ -953,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
953 /* ERR_MASK will only have valid bits if EOP set */ 989 /* ERR_MASK will only have valid bits if EOP set */
954 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 990 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
955 dev_kfree_skb_any(skb); 991 dev_kfree_skb_any(skb);
992 /* TODO: shouldn't we increment a counter indicating the
993 * drop?
994 */
956 goto next_desc; 995 goto next_desc;
957 } 996 }
958 997
@@ -1508,9 +1547,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1508static int i40e_xmit_descriptor_count(struct sk_buff *skb, 1547static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1509 struct i40e_ring *tx_ring) 1548 struct i40e_ring *tx_ring)
1510{ 1549{
1511#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1512 unsigned int f; 1550 unsigned int f;
1513#endif
1514 int count = 0; 1551 int count = 0;
1515 1552
1516 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 1553 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -1519,12 +1556,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1519 * + 1 desc for context descriptor, 1556 * + 1 desc for context descriptor,
1520 * otherwise try next time 1557 * otherwise try next time
1521 */ 1558 */
1522#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1523 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1559 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1524 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1560 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1525#else 1561
1526 count += skb_shinfo(skb)->nr_frags;
1527#endif
1528 count += TXD_USE_COUNT(skb_headlen(skb)); 1562 count += TXD_USE_COUNT(skb_headlen(skb));
1529 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1563 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1530 tx_ring->tx_stats.tx_busy++; 1564 tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 10bf49e18d7f..30d248bc5d19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -24,7 +27,7 @@
24#ifndef _I40E_TXRX_H_ 27#ifndef _I40E_TXRX_H_
25#define _I40E_TXRX_H_ 28#define _I40E_TXRX_H_
26 29
27/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
28 31
29#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
30#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
66 69
67/* Supported RSS offloads */ 70/* Supported RSS offloads */
68#define I40E_DEFAULT_RSS_HENA ( \ 71#define I40E_DEFAULT_RSS_HENA ( \
69 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
70 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
71 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
72 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
73 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 74 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 75 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 76 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 77 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
80 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ 78 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
81 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 79 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -119,11 +117,11 @@ enum i40e_dyn_idx_t {
119#define i40e_rx_desc i40e_32byte_rx_desc 117#define i40e_rx_desc i40e_32byte_rx_desc
120 118
121#define I40E_MIN_TX_LEN 17 119#define I40E_MIN_TX_LEN 17
122#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ 120#define I40E_MAX_DATA_PER_TXD 8192
123 121
124/* Tx Descriptors needed, worst case */ 122/* Tx Descriptors needed, worst case */
125#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 123#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
126#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 124#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
127 125
128#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
129#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -180,7 +178,6 @@ enum i40e_ring_state_t {
180 __I40E_TX_DETECT_HANG, 178 __I40E_TX_DETECT_HANG,
181 __I40E_HANG_CHECK_ARMED, 179 __I40E_HANG_CHECK_ARMED,
182 __I40E_RX_PS_ENABLED, 180 __I40E_RX_PS_ENABLED,
183 __I40E_RX_LRO_ENABLED,
184 __I40E_RX_16BYTE_DESC_ENABLED, 181 __I40E_RX_16BYTE_DESC_ENABLED,
185}; 182};
186 183
@@ -196,12 +193,6 @@ enum i40e_ring_state_t {
196 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 193 set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
197#define clear_check_for_tx_hang(ring) \ 194#define clear_check_for_tx_hang(ring) \
198 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) 195 clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
199#define ring_is_lro_enabled(ring) \
200 test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
201#define set_ring_lro_enabled(ring) \
202 set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
203#define clear_ring_lro_enabled(ring) \
204 clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
205#define ring_is_16byte_desc_enabled(ring) \ 196#define ring_is_16byte_desc_enabled(ring) \
206 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) 197 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
207#define set_ring_16byte_desc_enabled(ring) \ 198#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..d3cf5a69de54 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -32,13 +35,11 @@
32#include "i40e_lan_hmc.h" 35#include "i40e_lan_hmc.h"
33 36
34/* Device IDs */ 37/* Device IDs */
35#define I40E_DEV_ID_SFP_XL710 0x1572 38#define I40E_DEV_ID_SFP_XL710 0x1572
36#define I40E_DEV_ID_SFP_X710 0x1573
37#define I40E_DEV_ID_QEMU 0x1574 39#define I40E_DEV_ID_QEMU 0x1574
38#define I40E_DEV_ID_KX_A 0x157F 40#define I40E_DEV_ID_KX_A 0x157F
39#define I40E_DEV_ID_KX_B 0x1580 41#define I40E_DEV_ID_KX_B 0x1580
40#define I40E_DEV_ID_KX_C 0x1581 42#define I40E_DEV_ID_KX_C 0x1581
41#define I40E_DEV_ID_KX_D 0x1582
42#define I40E_DEV_ID_QSFP_A 0x1583 43#define I40E_DEV_ID_QSFP_A 0x1583
43#define I40E_DEV_ID_QSFP_B 0x1584 44#define I40E_DEV_ID_QSFP_B 0x1584
44#define I40E_DEV_ID_QSFP_C 0x1585 45#define I40E_DEV_ID_QSFP_C 0x1585
@@ -57,8 +58,8 @@
57/* Max default timeout in ms, */ 58/* Max default timeout in ms, */
58#define I40E_MAX_NVM_TIMEOUT 18000 59#define I40E_MAX_NVM_TIMEOUT 18000
59 60
60/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ 61/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
61#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) 62#define I40E_MS_TO_GTIME(time) ((time) * 1000)
62 63
63/* forward declaration */ 64/* forward declaration */
64struct i40e_hw; 65struct i40e_hw;
@@ -101,15 +102,6 @@ enum i40e_debug_mask {
101 I40E_DEBUG_ALL = 0xFFFFFFFF 102 I40E_DEBUG_ALL = 0xFFFFFFFF
102}; 103};
103 104
104/* PCI Bus Info */
105#define I40E_PCI_LINK_WIDTH_1 0x10
106#define I40E_PCI_LINK_WIDTH_2 0x20
107#define I40E_PCI_LINK_WIDTH_4 0x40
108#define I40E_PCI_LINK_WIDTH_8 0x80
109#define I40E_PCI_LINK_SPEED_2500 0x1
110#define I40E_PCI_LINK_SPEED_5000 0x2
111#define I40E_PCI_LINK_SPEED_8000 0x3
112
113/* These are structs for managing the hardware information and the operations. 105/* These are structs for managing the hardware information and the operations.
114 * The structures of function pointers are filled out at init time when we 106 * The structures of function pointers are filled out at init time when we
115 * know for sure exactly which hardware we're working with. This gives us the 107 * know for sure exactly which hardware we're working with. This gives us the
@@ -173,6 +165,9 @@ struct i40e_link_status {
173 u8 loopback; 165 u8 loopback;
174 /* is Link Status Event notification to SW enabled */ 166 /* is Link Status Event notification to SW enabled */
175 bool lse_enable; 167 bool lse_enable;
168 u16 max_frame_size;
169 bool crc_enable;
170 u8 pacing;
176}; 171};
177 172
178struct i40e_phy_info { 173struct i40e_phy_info {
@@ -415,6 +410,7 @@ struct i40e_driver_version {
415 u8 minor_version; 410 u8 minor_version;
416 u8 build_version; 411 u8 build_version;
417 u8 subbuild_version; 412 u8 subbuild_version;
413 u8 driver_string[32];
418}; 414};
419 415
420/* RX Descriptors */ 416/* RX Descriptors */
@@ -494,9 +490,6 @@ union i40e_32byte_rx_desc {
494 } wb; /* writeback */ 490 } wb; /* writeback */
495}; 491};
496 492
497#define I40E_RXD_QW1_STATUS_SHIFT 0
498#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
499
500enum i40e_rx_desc_status_bits { 493enum i40e_rx_desc_status_bits {
501 /* Note: These are predefined bit offsets */ 494 /* Note: These are predefined bit offsets */
502 I40E_RX_DESC_STATUS_DD_SHIFT = 0, 495 I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -513,9 +506,14 @@ enum i40e_rx_desc_status_bits {
513 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, 506 I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
514 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, 507 I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
515 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ 508 I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
516 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 509 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
510 I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
517}; 511};
518 512
513#define I40E_RXD_QW1_STATUS_SHIFT 0
514#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
515 << I40E_RXD_QW1_STATUS_SHIFT)
516
519#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT 517#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
520#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ 518#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
521 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) 519 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -543,7 +541,8 @@ enum i40e_rx_desc_error_bits {
543 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
544 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
545 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
546 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
547}; 546};
548 547
549enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -664,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
664 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, 663 I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
665 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ 664 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
666 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ 665 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
667 I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
668 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, 666 I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
669 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, 667 I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
670 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, 668 I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
868 866
869/* Packet Classifier Types for filters */ 867/* Packet Classifier Types for filters */
870enum i40e_filter_pctype { 868enum i40e_filter_pctype {
871 /* Note: Values 0-28 are reserved for future use */ 869 /* Note: Values 0-30 are reserved for future use */
872 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
873 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
874 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, 870 I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
875 I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, 871 /* Note: Value 32 is reserved for future use */
876 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, 872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
877 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, 873 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
878 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, 874 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
879 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, 875 I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
880 /* Note: Values 37-38 are reserved for future use */ 876 /* Note: Values 37-40 are reserved for future use */
881 I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
882 I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
883 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, 877 I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
884 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, 878 I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
885 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, 879 I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -961,6 +955,16 @@ struct i40e_vsi_context {
961 struct i40e_aqc_vsi_properties_data info; 955 struct i40e_aqc_vsi_properties_data info;
962}; 956};
963 957
958struct i40e_veb_context {
959 u16 seid;
960 u16 uplink_seid;
961 u16 veb_number;
962 u16 vebs_allocated;
963 u16 vebs_unallocated;
964 u16 flags;
965 struct i40e_aqc_get_veb_parameters_completion info;
966};
967
964/* Statistics collected by each port, VSI, VEB, and S-channel */ 968/* Statistics collected by each port, VSI, VEB, and S-channel */
965struct i40e_eth_stats { 969struct i40e_eth_stats {
966 u64 rx_bytes; /* gorc */ 970 u64 rx_bytes; /* gorc */
@@ -968,8 +972,6 @@ struct i40e_eth_stats {
968 u64 rx_multicast; /* mprc */ 972 u64 rx_multicast; /* mprc */
969 u64 rx_broadcast; /* bprc */ 973 u64 rx_broadcast; /* bprc */
970 u64 rx_discards; /* rdpc */ 974 u64 rx_discards; /* rdpc */
971 u64 rx_errors; /* repc */
972 u64 rx_missed; /* rmpc */
973 u64 rx_unknown_protocol; /* rupp */ 975 u64 rx_unknown_protocol; /* rupp */
974 u64 tx_bytes; /* gotc */ 976 u64 tx_bytes; /* gotc */
975 u64 tx_unicast; /* uptc */ 977 u64 tx_unicast; /* uptc */
@@ -1021,9 +1023,12 @@ struct i40e_hw_port_stats {
1021 u64 tx_size_big; /* ptc9522 */ 1023 u64 tx_size_big; /* ptc9522 */
1022 u64 mac_short_packet_dropped; /* mspdc */ 1024 u64 mac_short_packet_dropped; /* mspdc */
1023 u64 checksum_error; /* xec */ 1025 u64 checksum_error; /* xec */
1026 /* flow director stats */
1027 u64 fd_atr_match;
1028 u64 fd_sb_match;
1024 /* EEE LPI */ 1029 /* EEE LPI */
1025 bool tx_lpi_status; 1030 u32 tx_lpi_status;
1026 bool rx_lpi_status; 1031 u32 rx_lpi_status;
1027 u64 tx_lpi_count; /* etlpic */ 1032 u64 tx_lpi_count; /* etlpic */
1028 u64 rx_lpi_count; /* erlpic */ 1033 u64 rx_lpi_count; /* erlpic */
1029}; 1034};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index ccf45d04b7ef..cd18d5689006 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -338,10 +341,6 @@ struct i40e_virtchnl_pf_event {
338 int severity; 341 int severity;
339}; 342};
340 343
341/* The following are TBD, not necessary for LAN functionality.
342 * I40E_VIRTCHNL_OP_FCOE
343 */
344
345/* VF reset states - these are written into the RSTAT register: 344/* VF reset states - these are written into the RSTAT register:
346 * I40E_VFGEN_RSTAT1 on the PF 345 * I40E_VFGEN_RSTAT1 on the PF
347 * I40E_VFGEN_RSTAT on the VF 346 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 807807d62387..30ef519d4b91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -77,7 +80,7 @@ struct i40e_vsi {
77#define I40EVF_MIN_TXD 64 80#define I40EVF_MIN_TXD 64
78#define I40EVF_MAX_RXD 4096 81#define I40EVF_MAX_RXD 4096
79#define I40EVF_MIN_RXD 64 82#define I40EVF_MIN_RXD 64
80#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8 83#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
81 84
82/* Supported Rx Buffer Sizes */ 85/* Supported Rx Buffer Sizes */
83#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ 86#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
@@ -193,10 +196,12 @@ struct i40evf_adapter {
193 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 196 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
194 u32 tx_timeout_count; 197 u32 tx_timeout_count;
195 struct list_head mac_filter_list; 198 struct list_head mac_filter_list;
199 u32 tx_desc_count;
196 200
197 /* RX */ 201 /* RX */
198 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; 202 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
199 u64 hw_csum_rx_error; 203 u64 hw_csum_rx_error;
204 u32 rx_desc_count;
200 int num_msix_vectors; 205 int num_msix_vectors;
201 struct msix_entry *msix_entries; 206 struct msix_entry *msix_entries;
202 207
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..60407a9df0c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -44,8 +47,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
44 I40EVF_STAT("rx_multicast", current_stats.rx_multicast), 47 I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
45 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), 48 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
46 I40EVF_STAT("rx_discards", current_stats.rx_discards), 49 I40EVF_STAT("rx_discards", current_stats.rx_discards),
47 I40EVF_STAT("rx_errors", current_stats.rx_errors),
48 I40EVF_STAT("rx_missed", current_stats.rx_missed),
49 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), 50 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
50 I40EVF_STAT("tx_bytes", current_stats.tx_bytes), 51 I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
51 I40EVF_STAT("tx_unicast", current_stats.tx_unicast), 52 I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
@@ -56,10 +57,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
56}; 57};
57 58
58#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) 59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
59#define I40EVF_QUEUE_STATS_LEN \ 60#define I40EVF_QUEUE_STATS_LEN(_dev) \
60 (((struct i40evf_adapter *) \ 61 (((struct i40evf_adapter *) \
61 netdev_priv(netdev))->vsi_res->num_queue_pairs * 4) 62 netdev_priv(_dev))->vsi_res->num_queue_pairs \
62#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN) 63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
64#define I40EVF_STATS_LEN(_dev) \
65 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
63 66
64/** 67/**
65 * i40evf_get_settings - Get Link Speed and Duplex settings 68 * i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +78,7 @@ static int i40evf_get_settings(struct net_device *netdev,
75 /* In the future the VF will be able to query the PF for 78 /* In the future the VF will be able to query the PF for
76 * some information - for now use a dummy value 79 * some information - for now use a dummy value
77 */ 80 */
78 ecmd->supported = SUPPORTED_10000baseT_Full; 81 ecmd->supported = 0;
79 ecmd->autoneg = AUTONEG_DISABLE; 82 ecmd->autoneg = AUTONEG_DISABLE;
80 ecmd->transceiver = XCVR_DUMMY1; 83 ecmd->transceiver = XCVR_DUMMY1;
81 ecmd->port = PORT_NONE; 84 ecmd->port = PORT_NONE;
@@ -94,9 +97,9 @@ static int i40evf_get_settings(struct net_device *netdev,
94static int i40evf_get_sset_count(struct net_device *netdev, int sset) 97static int i40evf_get_sset_count(struct net_device *netdev, int sset)
95{ 98{
96 if (sset == ETH_SS_STATS) 99 if (sset == ETH_SS_STATS)
97 return I40EVF_STATS_LEN; 100 return I40EVF_STATS_LEN(netdev);
98 else 101 else
99 return -ENOTSUPP; 102 return -EINVAL;
100} 103}
101 104
102/** 105/**
@@ -219,13 +222,11 @@ static void i40evf_get_ringparam(struct net_device *netdev,
219 struct ethtool_ringparam *ring) 222 struct ethtool_ringparam *ring)
220{ 223{
221 struct i40evf_adapter *adapter = netdev_priv(netdev); 224 struct i40evf_adapter *adapter = netdev_priv(netdev);
222 struct i40e_ring *tx_ring = adapter->tx_rings[0];
223 struct i40e_ring *rx_ring = adapter->rx_rings[0];
224 225
225 ring->rx_max_pending = I40EVF_MAX_RXD; 226 ring->rx_max_pending = I40EVF_MAX_RXD;
226 ring->tx_max_pending = I40EVF_MAX_TXD; 227 ring->tx_max_pending = I40EVF_MAX_TXD;
227 ring->rx_pending = rx_ring->count; 228 ring->rx_pending = adapter->rx_desc_count;
228 ring->tx_pending = tx_ring->count; 229 ring->tx_pending = adapter->tx_desc_count;
229} 230}
230 231
231/** 232/**
@@ -241,7 +242,6 @@ static int i40evf_set_ringparam(struct net_device *netdev,
241{ 242{
242 struct i40evf_adapter *adapter = netdev_priv(netdev); 243 struct i40evf_adapter *adapter = netdev_priv(netdev);
243 u32 new_rx_count, new_tx_count; 244 u32 new_rx_count, new_tx_count;
244 int i;
245 245
246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
247 return -EINVAL; 247 return -EINVAL;
@@ -257,17 +257,16 @@ static int i40evf_set_ringparam(struct net_device *netdev,
257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); 257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
258 258
259 /* if nothing to do return success */ 259 /* if nothing to do return success */
260 if ((new_tx_count == adapter->tx_rings[0]->count) && 260 if ((new_tx_count == adapter->tx_desc_count) &&
261 (new_rx_count == adapter->rx_rings[0]->count)) 261 (new_rx_count == adapter->rx_desc_count))
262 return 0; 262 return 0;
263 263
264 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 264 adapter->tx_desc_count = new_tx_count;
265 adapter->tx_rings[0]->count = new_tx_count; 265 adapter->rx_desc_count = new_rx_count;
266 adapter->rx_rings[0]->count = new_rx_count;
267 }
268 266
269 if (netif_running(netdev)) 267 if (netif_running(netdev))
270 i40evf_reinit_locked(adapter); 268 i40evf_reinit_locked(adapter);
269
271 return 0; 270 return 0;
272} 271}
273 272
@@ -290,14 +289,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
290 ec->rx_max_coalesced_frames = vsi->work_limit; 289 ec->rx_max_coalesced_frames = vsi->work_limit;
291 290
292 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) 291 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
293 ec->rx_coalesce_usecs = 1; 292 ec->use_adaptive_rx_coalesce = 1;
294 else
295 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
296 293
297 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) 294 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
298 ec->tx_coalesce_usecs = 1; 295 ec->use_adaptive_tx_coalesce = 1;
299 else 296
300 ec->tx_coalesce_usecs = vsi->tx_itr_setting; 297 ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
298 ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
301 299
302 return 0; 300 return 0;
303} 301}
@@ -318,54 +316,361 @@ static int i40evf_set_coalesce(struct net_device *netdev,
318 struct i40e_q_vector *q_vector; 316 struct i40e_q_vector *q_vector;
319 int i; 317 int i;
320 318
321 if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames) 319 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
322 vsi->work_limit = ec->tx_max_coalesced_frames; 320 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
321
322 if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
323 (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
324 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
325
326 else
327 return -EINVAL;
328
329 if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
330 (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
331 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
332 else if (ec->use_adaptive_tx_coalesce)
333 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
334 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
335 else
336 return -EINVAL;
337
338 if (ec->use_adaptive_rx_coalesce)
339 vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
340 else
341 vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
342
343 if (ec->use_adaptive_tx_coalesce)
344 vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
345 else
346 vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
323 347
324 switch (ec->rx_coalesce_usecs) { 348 for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
325 case 0: 349 q_vector = adapter->q_vector[i];
326 vsi->rx_itr_setting = 0; 350 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
351 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
352 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
353 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
354 i40e_flush(hw);
355 }
356
357 return 0;
358}
359
360/**
361 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
362 * @adapter: board private structure
363 * @cmd: ethtool rxnfc command
364 *
365 * Returns Success if the flow is supported, else Invalid Input.
366 **/
367static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
368 struct ethtool_rxnfc *cmd)
369{
370 struct i40e_hw *hw = &adapter->hw;
371 u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
372 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
373
374 /* We always hash on IP src and dest addresses */
375 cmd->data = RXH_IP_SRC | RXH_IP_DST;
376
377 switch (cmd->flow_type) {
378 case TCP_V4_FLOW:
379 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
380 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
327 break; 381 break;
328 case 1: 382 case UDP_V4_FLOW:
329 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC 383 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
330 | ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); 384 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
331 break; 385 break;
332 default: 386
333 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || 387 case SCTP_V4_FLOW:
334 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) 388 case AH_ESP_V4_FLOW:
335 return -EINVAL; 389 case AH_V4_FLOW:
336 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 390 case ESP_V4_FLOW:
391 case IPV4_FLOW:
392 break;
393
394 case TCP_V6_FLOW:
395 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
396 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
337 break; 397 break;
398 case UDP_V6_FLOW:
399 if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
400 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
401 break;
402
403 case SCTP_V6_FLOW:
404 case AH_ESP_V6_FLOW:
405 case AH_V6_FLOW:
406 case ESP_V6_FLOW:
407 case IPV6_FLOW:
408 break;
409 default:
410 cmd->data = 0;
411 return -EINVAL;
338 } 412 }
339 413
340 switch (ec->tx_coalesce_usecs) { 414 return 0;
341 case 0: 415}
342 vsi->tx_itr_setting = 0; 416
417/**
418 * i40evf_get_rxnfc - command to get RX flow classification rules
419 * @netdev: network interface device structure
420 * @cmd: ethtool rxnfc command
421 *
422 * Returns Success if the command is supported.
423 **/
424static int i40evf_get_rxnfc(struct net_device *netdev,
425 struct ethtool_rxnfc *cmd,
426 u32 *rule_locs)
427{
428 struct i40evf_adapter *adapter = netdev_priv(netdev);
429 int ret = -EOPNOTSUPP;
430
431 switch (cmd->cmd) {
432 case ETHTOOL_GRXRINGS:
433 cmd->data = adapter->vsi_res->num_queue_pairs;
434 ret = 0;
343 break; 435 break;
344 case 1: 436 case ETHTOOL_GRXFH:
345 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC 437 ret = i40evf_get_rss_hash_opts(adapter, cmd);
346 | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
347 break; 438 break;
348 default: 439 default:
349 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || 440 break;
350 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) 441 }
442
443 return ret;
444}
445
446/**
447 * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
448 * @adapter: board private structure
449 * @cmd: ethtool rxnfc command
450 *
451 * Returns Success if the flow input set is supported.
452 **/
453static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
454 struct ethtool_rxnfc *nfc)
455{
456 struct i40e_hw *hw = &adapter->hw;
457
458 u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
459 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
460
461 /* RSS does not support anything other than hashing
462 * to queues on src and dst IPs and ports
463 */
464 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
465 RXH_L4_B_0_1 | RXH_L4_B_2_3))
466 return -EINVAL;
467
468 /* We need at least the IP SRC and DEST fields for hashing */
469 if (!(nfc->data & RXH_IP_SRC) ||
470 !(nfc->data & RXH_IP_DST))
471 return -EINVAL;
472
473 switch (nfc->flow_type) {
474 case TCP_V4_FLOW:
475 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
476 case 0:
477 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
478 break;
479 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
480 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
481 break;
482 default:
351 return -EINVAL; 483 return -EINVAL;
352 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 484 }
485 break;
486 case TCP_V6_FLOW:
487 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
488 case 0:
489 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
490 break;
491 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
492 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
493 break;
494 default:
495 return -EINVAL;
496 }
497 break;
498 case UDP_V4_FLOW:
499 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
500 case 0:
501 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
502 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
503 break;
504 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
505 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
506 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
507 break;
508 default:
509 return -EINVAL;
510 }
353 break; 511 break;
512 case UDP_V6_FLOW:
513 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
514 case 0:
515 hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
516 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
517 break;
518 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
519 hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
520 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
521 break;
522 default:
523 return -EINVAL;
524 }
525 break;
526 case AH_ESP_V4_FLOW:
527 case AH_V4_FLOW:
528 case ESP_V4_FLOW:
529 case SCTP_V4_FLOW:
530 if ((nfc->data & RXH_L4_B_0_1) ||
531 (nfc->data & RXH_L4_B_2_3))
532 return -EINVAL;
533 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
534 break;
535 case AH_ESP_V6_FLOW:
536 case AH_V6_FLOW:
537 case ESP_V6_FLOW:
538 case SCTP_V6_FLOW:
539 if ((nfc->data & RXH_L4_B_0_1) ||
540 (nfc->data & RXH_L4_B_2_3))
541 return -EINVAL;
542 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
543 break;
544 case IPV4_FLOW:
545 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
546 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
547 break;
548 case IPV6_FLOW:
549 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
550 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
551 break;
552 default:
553 return -EINVAL;
354 } 554 }
355 555
356 for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { 556 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
357 q_vector = adapter->q_vector[i]; 557 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
358 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 558 i40e_flush(hw);
359 wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); 559
360 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 560 return 0;
361 wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr); 561}
362 i40e_flush(hw); 562
563/**
564 * i40evf_set_rxnfc - command to set RX flow classification rules
565 * @netdev: network interface device structure
566 * @cmd: ethtool rxnfc command
567 *
568 * Returns Success if the command is supported.
569 **/
570static int i40evf_set_rxnfc(struct net_device *netdev,
571 struct ethtool_rxnfc *cmd)
572{
573 struct i40evf_adapter *adapter = netdev_priv(netdev);
574 int ret = -EOPNOTSUPP;
575
576 switch (cmd->cmd) {
577 case ETHTOOL_SRXFH:
578 ret = i40evf_set_rss_hash_opt(adapter, cmd);
579 break;
580 default:
581 break;
582 }
583
584 return ret;
585}
586
587/**
588 * i40evf_get_channels: get the number of channels supported by the device
589 * @netdev: network interface device structure
590 * @ch: channel information structure
591 *
592 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
593 * queue pair. Report one extra channel to match our "other" MSI-X vector.
594 **/
595static void i40evf_get_channels(struct net_device *netdev,
596 struct ethtool_channels *ch)
597{
598 struct i40evf_adapter *adapter = netdev_priv(netdev);
599
600 /* Report maximum channels */
601 ch->max_combined = adapter->vsi_res->num_queue_pairs;
602
603 ch->max_other = NONQ_VECS;
604 ch->other_count = NONQ_VECS;
605
606 ch->combined_count = adapter->vsi_res->num_queue_pairs;
607}
608
609/**
610 * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
611 * @netdev: network interface device structure
612 *
613 * Returns the table size.
614 **/
615static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
616{
617 return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
618}
619
620/**
621 * i40evf_get_rxfh - get the rx flow hash indirection table
622 * @netdev: network interface device structure
623 * @indir: indirection table
624 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
625 *
626 * Reads the indirection table directly from the hardware. Always returns 0.
627 **/
628static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
629{
630 struct i40evf_adapter *adapter = netdev_priv(netdev);
631 struct i40e_hw *hw = &adapter->hw;
632 u32 hlut_val;
633 int i, j;
634
635 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
636 hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
637 indir[j++] = hlut_val & 0xff;
638 indir[j++] = (hlut_val >> 8) & 0xff;
639 indir[j++] = (hlut_val >> 16) & 0xff;
640 indir[j++] = (hlut_val >> 24) & 0xff;
641 }
642 return 0;
643}
644
645/**
646 * i40evf_set_rxfh - set the rx flow hash indirection table
647 * @netdev: network interface device structure
648 * @indir: indirection table
649 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
650 *
651 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
652 * returns 0 after programming the table.
653 **/
654static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
655 const u8 *key)
656{
657 struct i40evf_adapter *adapter = netdev_priv(netdev);
658 struct i40e_hw *hw = &adapter->hw;
659 u32 hlut_val;
660 int i, j;
661
662 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
663 hlut_val = indir[j++];
664 hlut_val |= indir[j++] << 8;
665 hlut_val |= indir[j++] << 16;
666 hlut_val |= indir[j++] << 24;
667 wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
363 } 668 }
364 669
365 return 0; 670 return 0;
366} 671}
367 672
368static struct ethtool_ops i40evf_ethtool_ops = { 673static const struct ethtool_ops i40evf_ethtool_ops = {
369 .get_settings = i40evf_get_settings, 674 .get_settings = i40evf_get_settings,
370 .get_drvinfo = i40evf_get_drvinfo, 675 .get_drvinfo = i40evf_get_drvinfo,
371 .get_link = ethtool_op_get_link, 676 .get_link = ethtool_op_get_link,
@@ -378,6 +683,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
378 .set_msglevel = i40evf_set_msglevel, 683 .set_msglevel = i40evf_set_msglevel,
379 .get_coalesce = i40evf_get_coalesce, 684 .get_coalesce = i40evf_get_coalesce,
380 .set_coalesce = i40evf_set_coalesce, 685 .set_coalesce = i40evf_set_coalesce,
686 .get_rxnfc = i40evf_get_rxnfc,
687 .set_rxnfc = i40evf_set_rxnfc,
688 .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
689 .get_rxfh = i40evf_get_rxfh,
690 .set_rxfh = i40evf_set_rxfh,
691 .get_channels = i40evf_get_channels,
381}; 692};
382 693
383/** 694/**
@@ -389,5 +700,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
389 **/ 700 **/
390void i40evf_set_ethtool_ops(struct net_device *netdev) 701void i40evf_set_ethtool_ops(struct net_device *netdev)
391{ 702{
392 SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops); 703 netdev->ethtool_ops = &i40evf_ethtool_ops;
393} 704}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..7fc5f3b5d6bf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -25,13 +28,15 @@
25#include "i40e_prototype.h" 28#include "i40e_prototype.h"
26static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); 29static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
27static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); 30static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
31static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
32static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
28static int i40evf_close(struct net_device *netdev); 33static int i40evf_close(struct net_device *netdev);
29 34
30char i40evf_driver_name[] = "i40evf"; 35char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 37 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 38
34#define DRV_VERSION "0.9.16" 39#define DRV_VERSION "0.9.34"
35const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
37 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -167,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
167 struct i40evf_adapter *adapter = netdev_priv(netdev); 172 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 173
169 adapter->tx_timeout_count++; 174 adapter->tx_timeout_count++;
170 dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
171 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { 175 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
172 adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 176 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
173 schedule_work(&adapter->reset_task); 177 schedule_work(&adapter->reset_task);
@@ -657,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
657 f = i40evf_find_vlan(adapter, vlan); 661 f = i40evf_find_vlan(adapter, vlan);
658 if (NULL == f) { 662 if (NULL == f) {
659 f = kzalloc(sizeof(*f), GFP_ATOMIC); 663 f = kzalloc(sizeof(*f), GFP_ATOMIC);
660 if (NULL == f) { 664 if (NULL == f)
661 dev_info(&adapter->pdev->dev,
662 "%s: no memory for new VLAN filter\n",
663 __func__);
664 return NULL; 665 return NULL;
665 } 666
666 f->vlan = vlan; 667 f->vlan = vlan;
667 668
668 INIT_LIST_HEAD(&f->list); 669 INIT_LIST_HEAD(&f->list);
@@ -688,7 +689,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
688 f->remove = true; 689 f->remove = true;
689 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 690 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
690 } 691 }
691 return;
692} 692}
693 693
694/** 694/**
@@ -767,14 +767,12 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
767 if (NULL == f) { 767 if (NULL == f) {
768 f = kzalloc(sizeof(*f), GFP_ATOMIC); 768 f = kzalloc(sizeof(*f), GFP_ATOMIC);
769 if (NULL == f) { 769 if (NULL == f) {
770 dev_info(&adapter->pdev->dev,
771 "%s: no memory for new filter\n", __func__);
772 clear_bit(__I40EVF_IN_CRITICAL_TASK, 770 clear_bit(__I40EVF_IN_CRITICAL_TASK,
773 &adapter->crit_section); 771 &adapter->crit_section);
774 return NULL; 772 return NULL;
775 } 773 }
776 774
777 memcpy(f->macaddr, macaddr, ETH_ALEN); 775 ether_addr_copy(f->macaddr, macaddr);
778 776
779 list_add(&f->list, &adapter->mac_filter_list); 777 list_add(&f->list, &adapter->mac_filter_list);
780 f->add = true; 778 f->add = true;
@@ -807,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
807 805
808 f = i40evf_add_filter(adapter, addr->sa_data); 806 f = i40evf_add_filter(adapter, addr->sa_data);
809 if (f) { 807 if (f) {
810 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 808 ether_addr_copy(hw->mac.addr, addr->sa_data);
811 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 809 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
812 netdev->addr_len);
813 } 810 }
814 811
815 return (f == NULL) ? -ENOMEM : 0; 812 return (f == NULL) ? -ENOMEM : 0;
@@ -841,7 +838,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
841 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 838 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
842 bool found = false; 839 bool found = false;
843 840
844 if (f->macaddr[0] & 0x01) { 841 if (is_multicast_ether_addr(f->macaddr)) {
845 netdev_for_each_mc_addr(mca, netdev) { 842 netdev_for_each_mc_addr(mca, netdev) {
846 if (ether_addr_equal(mca->addr, f->macaddr)) { 843 if (ether_addr_equal(mca->addr, f->macaddr)) {
847 found = true; 844 found = true;
@@ -970,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
970 struct net_device *netdev = adapter->netdev; 967 struct net_device *netdev = adapter->netdev;
971 struct i40evf_mac_filter *f; 968 struct i40evf_mac_filter *f;
972 969
970 if (adapter->state == __I40EVF_DOWN)
971 return;
972
973 /* remove all MAC filters */ 973 /* remove all MAC filters */
974 list_for_each_entry(f, &adapter->mac_filter_list, list) { 974 list_for_each_entry(f, &adapter->mac_filter_list, list) {
975 f->remove = true; 975 f->remove = true;
@@ -1027,30 +1027,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1027 * Right now, we simply care about how many we'll get; we'll 1027 * Right now, we simply care about how many we'll get; we'll
1028 * set them up later while requesting irq's. 1028 * set them up later while requesting irq's.
1029 */ 1029 */
1030 while (vectors >= vector_threshold) { 1030 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1031 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1031 vector_threshold, vectors);
1032 vectors); 1032 if (err < 0) {
1033 if (!err) /* Success in acquiring all requested vectors. */ 1033 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1034 break;
1035 else if (err < 0)
1036 vectors = 0; /* Nasty failure, quit now */
1037 else /* err == number of vectors we should try again with */
1038 vectors = err;
1039 }
1040
1041 if (vectors < vector_threshold) {
1042 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
1043 kfree(adapter->msix_entries); 1034 kfree(adapter->msix_entries);
1044 adapter->msix_entries = NULL; 1035 adapter->msix_entries = NULL;
1045 err = -EIO; 1036 return err;
1046 } else {
1047 /* Adjust for only the vectors we'll use, which is minimum
1048 * of max_msix_q_vectors + NONQ_VECS, or the number of
1049 * vectors we were allocated.
1050 */
1051 adapter->num_msix_vectors = vectors;
1052 } 1037 }
1053 return err; 1038
1039 /* Adjust for only the vectors we'll use, which is minimum
1040 * of max_msix_q_vectors + NONQ_VECS, or the number of
1041 * vectors we were allocated.
1042 */
1043 adapter->num_msix_vectors = err;
1044 return 0;
1054} 1045}
1055 1046
1056/** 1047/**
@@ -1096,14 +1087,14 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1096 tx_ring->queue_index = i; 1087 tx_ring->queue_index = i;
1097 tx_ring->netdev = adapter->netdev; 1088 tx_ring->netdev = adapter->netdev;
1098 tx_ring->dev = &adapter->pdev->dev; 1089 tx_ring->dev = &adapter->pdev->dev;
1099 tx_ring->count = I40EVF_DEFAULT_TXD; 1090 tx_ring->count = adapter->tx_desc_count;
1100 adapter->tx_rings[i] = tx_ring; 1091 adapter->tx_rings[i] = tx_ring;
1101 1092
1102 rx_ring = &tx_ring[1]; 1093 rx_ring = &tx_ring[1];
1103 rx_ring->queue_index = i; 1094 rx_ring->queue_index = i;
1104 rx_ring->netdev = adapter->netdev; 1095 rx_ring->netdev = adapter->netdev;
1105 rx_ring->dev = &adapter->pdev->dev; 1096 rx_ring->dev = &adapter->pdev->dev;
1106 rx_ring->count = I40EVF_DEFAULT_RXD; 1097 rx_ring->count = adapter->rx_desc_count;
1107 adapter->rx_rings[i] = rx_ring; 1098 adapter->rx_rings[i] = rx_ring;
1108 } 1099 }
1109 1100
@@ -1141,9 +1132,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1141 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1132 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1142 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); 1133 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1143 1134
1144 /* A failure in MSI-X entry allocation isn't fatal, but it does
1145 * mean we disable MSI-X capabilities of the adapter.
1146 */
1147 adapter->msix_entries = kcalloc(v_budget, 1135 adapter->msix_entries = kcalloc(v_budget,
1148 sizeof(struct msix_entry), GFP_KERNEL); 1136 sizeof(struct msix_entry), GFP_KERNEL);
1149 if (!adapter->msix_entries) { 1137 if (!adapter->msix_entries) {
@@ -1183,7 +1171,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1183 q_vector->vsi = &adapter->vsi; 1171 q_vector->vsi = &adapter->vsi;
1184 q_vector->v_idx = q_idx; 1172 q_vector->v_idx = q_idx;
1185 netif_napi_add(adapter->netdev, &q_vector->napi, 1173 netif_napi_add(adapter->netdev, &q_vector->napi,
1186 i40evf_napi_poll, 64); 1174 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1187 adapter->q_vector[q_idx] = q_vector; 1175 adapter->q_vector[q_idx] = q_vector;
1188 } 1176 }
1189 1177
@@ -1236,8 +1224,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1236 pci_disable_msix(adapter->pdev); 1224 pci_disable_msix(adapter->pdev);
1237 kfree(adapter->msix_entries); 1225 kfree(adapter->msix_entries);
1238 adapter->msix_entries = NULL; 1226 adapter->msix_entries = NULL;
1239
1240 return;
1241} 1227}
1242 1228
1243/** 1229/**
@@ -1309,7 +1295,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
1309 goto restart_watchdog; 1295 goto restart_watchdog;
1310 1296
1311 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { 1297 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1312 dev_info(&adapter->pdev->dev, "Checking for redemption\n");
1313 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) { 1298 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
1314 /* A chance for redemption! */ 1299 /* A chance for redemption! */
1315 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); 1300 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1340,8 +1325,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
1340 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1325 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
1341 adapter->state = __I40EVF_RESETTING; 1326 adapter->state = __I40EVF_RESETTING;
1342 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1327 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1343 dev_err(&adapter->pdev->dev, "Hardware reset detected.\n"); 1328 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1344 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1345 schedule_work(&adapter->reset_task); 1329 schedule_work(&adapter->reset_task);
1346 adapter->aq_pending = 0; 1330 adapter->aq_pending = 0;
1347 adapter->aq_required = 0; 1331 adapter->aq_required = 0;
@@ -1413,7 +1397,7 @@ restart_watchdog:
1413} 1397}
1414 1398
1415/** 1399/**
1416 * i40evf_configure_rss - increment to next available tx queue 1400 * next_queue - increment to next available tx queue
1417 * @adapter: board private structure 1401 * @adapter: board private structure
1418 * @j: queue counter 1402 * @j: queue counter
1419 * 1403 *
@@ -1504,15 +1488,12 @@ static void i40evf_reset_task(struct work_struct *work)
1504 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1488 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1505 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1489 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1506 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1490 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1507 if (rstat_val != I40E_VFR_VFACTIVE) { 1491 if (rstat_val != I40E_VFR_VFACTIVE)
1508 dev_info(&adapter->pdev->dev, "Reset now occurring\n");
1509 break; 1492 break;
1510 } else { 1493 else
1511 msleep(I40EVF_RESET_WAIT_MS); 1494 msleep(I40EVF_RESET_WAIT_MS);
1512 }
1513 } 1495 }
1514 if (i == I40EVF_RESET_WAIT_COUNT) { 1496 if (i == I40EVF_RESET_WAIT_COUNT) {
1515 dev_err(&adapter->pdev->dev, "Reset was not detected\n");
1516 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1497 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1517 goto continue_reset; /* act like the reset happened */ 1498 goto continue_reset; /* act like the reset happened */
1518 } 1499 }
@@ -1521,22 +1502,24 @@ static void i40evf_reset_task(struct work_struct *work)
1521 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1502 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1522 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1503 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1523 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1504 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1524 if (rstat_val == I40E_VFR_VFACTIVE) { 1505 if (rstat_val == I40E_VFR_VFACTIVE)
1525 dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
1526 break; 1506 break;
1527 } else { 1507 else
1528 msleep(I40EVF_RESET_WAIT_MS); 1508 msleep(I40EVF_RESET_WAIT_MS);
1529 }
1530 } 1509 }
1531 if (i == I40EVF_RESET_WAIT_COUNT) { 1510 if (i == I40EVF_RESET_WAIT_COUNT) {
1532 /* reset never finished */ 1511 /* reset never finished */
1533 dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n", 1512 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1534 rstat_val); 1513 rstat_val);
1535 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 1514 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1536 1515
1537 if (netif_running(adapter->netdev)) 1516 if (netif_running(adapter->netdev)) {
1538 i40evf_close(adapter->netdev); 1517 set_bit(__I40E_DOWN, &adapter->vsi.state);
1539 1518 i40evf_down(adapter);
1519 i40evf_free_traffic_irqs(adapter);
1520 i40evf_free_all_tx_resources(adapter);
1521 i40evf_free_all_rx_resources(adapter);
1522 }
1540 i40evf_free_misc_irq(adapter); 1523 i40evf_free_misc_irq(adapter);
1541 i40evf_reset_interrupt_capability(adapter); 1524 i40evf_reset_interrupt_capability(adapter);
1542 i40evf_free_queues(adapter); 1525 i40evf_free_queues(adapter);
@@ -1591,7 +1574,7 @@ continue_reset:
1591 } 1574 }
1592 return; 1575 return;
1593reset_err: 1576reset_err:
1594 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); 1577 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1595 i40evf_close(adapter->netdev); 1578 i40evf_close(adapter->netdev);
1596} 1579}
1597 1580
@@ -1607,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
1607 struct i40e_arq_event_info event; 1590 struct i40e_arq_event_info event;
1608 struct i40e_virtchnl_msg *v_msg; 1591 struct i40e_virtchnl_msg *v_msg;
1609 i40e_status ret; 1592 i40e_status ret;
1593 u32 val, oldval;
1610 u16 pending; 1594 u16 pending;
1611 1595
1612 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 1596 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
@@ -1614,11 +1598,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1614 1598
1615 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1599 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1616 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1600 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
1617 if (!event.msg_buf) { 1601 if (!event.msg_buf)
1618 dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
1619 __func__);
1620 return; 1602 return;
1621 } 1603
1622 v_msg = (struct i40e_virtchnl_msg *)&event.desc; 1604 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1623 do { 1605 do {
1624 ret = i40evf_clean_arq_element(hw, &event, &pending); 1606 ret = i40evf_clean_arq_element(hw, &event, &pending);
@@ -1636,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
1636 } 1618 }
1637 } while (pending); 1619 } while (pending);
1638 1620
1621 /* check for error indications */
1622 val = rd32(hw, hw->aq.arq.len);
1623 oldval = val;
1624 if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
1625 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
1626 val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1627 }
1628 if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1629 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
1630 val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1631 }
1632 if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1633 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
1634 val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1635 }
1636 if (oldval != val)
1637 wr32(hw, hw->aq.arq.len, val);
1638
1639 val = rd32(hw, hw->aq.asq.len);
1640 oldval = val;
1641 if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
1642 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
1643 val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1644 }
1645 if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1646 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
1647 val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1648 }
1649 if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1650 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
1651 val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1652 }
1653 if (oldval != val)
1654 wr32(hw, hw->aq.asq.len, val);
1655
1639 /* re-enable Admin queue interrupt cause */ 1656 /* re-enable Admin queue interrupt cause */
1640 i40evf_misc_irq_enable(adapter); 1657 i40evf_misc_irq_enable(adapter);
1641 1658
@@ -1673,6 +1690,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
1673 int i, err = 0; 1690 int i, err = 0;
1674 1691
1675 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1692 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
1693 adapter->tx_rings[i]->count = adapter->tx_desc_count;
1676 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); 1694 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
1677 if (!err) 1695 if (!err)
1678 continue; 1696 continue;
@@ -1700,6 +1718,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
1700 int i, err = 0; 1718 int i, err = 0;
1701 1719
1702 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1720 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
1721 adapter->rx_rings[i]->count = adapter->rx_desc_count;
1703 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); 1722 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
1704 if (!err) 1723 if (!err)
1705 continue; 1724 continue;
@@ -1804,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
1804 if (adapter->state <= __I40EVF_DOWN) 1823 if (adapter->state <= __I40EVF_DOWN)
1805 return 0; 1824 return 0;
1806 1825
1807 /* signal that we are down to the interrupt handler */
1808 adapter->state = __I40EVF_DOWN;
1809 1826
1810 set_bit(__I40E_DOWN, &adapter->vsi.state); 1827 set_bit(__I40E_DOWN, &adapter->vsi.state);
1811 1828
1812 i40evf_down(adapter); 1829 i40evf_down(adapter);
1830 adapter->state = __I40EVF_DOWN;
1813 i40evf_free_traffic_irqs(adapter); 1831 i40evf_free_traffic_irqs(adapter);
1814 1832
1815 i40evf_free_all_tx_resources(adapter); 1833 i40evf_free_all_tx_resources(adapter);
@@ -1848,8 +1866,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
1848 1866
1849 WARN_ON(in_interrupt()); 1867 WARN_ON(in_interrupt());
1850 1868
1851 adapter->state = __I40EVF_RESETTING;
1852
1853 i40evf_down(adapter); 1869 i40evf_down(adapter);
1854 1870
1855 /* allocate transmit descriptors */ 1871 /* allocate transmit descriptors */
@@ -1872,7 +1888,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
1872 return; 1888 return;
1873 1889
1874err_reinit: 1890err_reinit:
1875 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); 1891 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1876 i40evf_close(netdev); 1892 i40evf_close(netdev);
1877} 1893}
1878 1894
@@ -1967,7 +1983,7 @@ static void i40evf_init_task(struct work_struct *work)
1967 } 1983 }
1968 err = i40evf_check_reset_complete(hw); 1984 err = i40evf_check_reset_complete(hw);
1969 if (err) { 1985 if (err) {
1970 dev_err(&pdev->dev, "Device is still in reset (%d)\n", 1986 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1971 err); 1987 err);
1972 goto err; 1988 goto err;
1973 } 1989 }
@@ -1993,14 +2009,14 @@ static void i40evf_init_task(struct work_struct *work)
1993 break; 2009 break;
1994 case __I40EVF_INIT_VERSION_CHECK: 2010 case __I40EVF_INIT_VERSION_CHECK:
1995 if (!i40evf_asq_done(hw)) { 2011 if (!i40evf_asq_done(hw)) {
1996 dev_err(&pdev->dev, "Admin queue command never completed.\n"); 2012 dev_err(&pdev->dev, "Admin queue command never completed\n");
1997 goto err; 2013 goto err;
1998 } 2014 }
1999 2015
2000 /* aq msg sent, awaiting reply */ 2016 /* aq msg sent, awaiting reply */
2001 err = i40evf_verify_api_ver(adapter); 2017 err = i40evf_verify_api_ver(adapter);
2002 if (err) { 2018 if (err) {
2003 dev_err(&pdev->dev, "Unable to verify API version (%d)\n", 2019 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
2004 err); 2020 err);
2005 goto err; 2021 goto err;
2006 } 2022 }
@@ -2074,12 +2090,12 @@ static void i40evf_init_task(struct work_struct *work)
2074 netdev->hw_features &= ~NETIF_F_RXCSUM; 2090 netdev->hw_features &= ~NETIF_F_RXCSUM;
2075 2091
2076 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2092 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2077 dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", 2093 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2078 adapter->hw.mac.addr); 2094 adapter->hw.mac.addr);
2079 random_ether_addr(adapter->hw.mac.addr); 2095 random_ether_addr(adapter->hw.mac.addr);
2080 } 2096 }
2081 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2097 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2082 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2098 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2083 2099
2084 INIT_LIST_HEAD(&adapter->mac_filter_list); 2100 INIT_LIST_HEAD(&adapter->mac_filter_list);
2085 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2101 INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2087,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
2087 if (NULL == f) 2103 if (NULL == f)
2088 goto err_sw_init; 2104 goto err_sw_init;
2089 2105
2090 memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN); 2106 ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
2091 f->add = true; 2107 f->add = true;
2092 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 2108 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
2093 2109
@@ -2098,6 +2114,8 @@ static void i40evf_init_task(struct work_struct *work)
2098 adapter->watchdog_timer.data = (unsigned long)adapter; 2114 adapter->watchdog_timer.data = (unsigned long)adapter;
2099 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2115 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2100 2116
2117 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2118 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2101 err = i40evf_init_interrupt_scheme(adapter); 2119 err = i40evf_init_interrupt_scheme(adapter);
2102 if (err) 2120 if (err)
2103 goto err_sw_init; 2121 goto err_sw_init;
@@ -2114,8 +2132,10 @@ static void i40evf_init_task(struct work_struct *work)
2114 adapter->vsi.back = adapter; 2132 adapter->vsi.back = adapter;
2115 adapter->vsi.base_vector = 1; 2133 adapter->vsi.base_vector = 1;
2116 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; 2134 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2117 adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC; 2135 adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
2118 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; 2136 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
2137 adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
2138 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
2119 adapter->vsi.netdev = adapter->netdev; 2139 adapter->vsi.netdev = adapter->netdev;
2120 2140
2121 if (!adapter->netdev_registered) { 2141 if (!adapter->netdev_registered) {
@@ -2128,7 +2148,7 @@ static void i40evf_init_task(struct work_struct *work)
2128 2148
2129 netif_tx_stop_all_queues(netdev); 2149 netif_tx_stop_all_queues(netdev);
2130 2150
2131 dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr); 2151 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2132 if (netdev->features & NETIF_F_GRO) 2152 if (netdev->features & NETIF_F_GRO)
2133 dev_info(&pdev->dev, "GRO is enabled\n"); 2153 dev_info(&pdev->dev, "GRO is enabled\n");
2134 2154
@@ -2152,12 +2172,11 @@ err_alloc:
2152err: 2172err:
2153 /* Things went into the weeds, so try again later */ 2173 /* Things went into the weeds, so try again later */
2154 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { 2174 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2155 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); 2175 dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
2156 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 2176 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2157 return; /* do not reschedule */ 2177 return; /* do not reschedule */
2158 } 2178 }
2159 schedule_delayed_work(&adapter->init_task, HZ * 3); 2179 schedule_delayed_work(&adapter->init_task, HZ * 3);
2160 return;
2161} 2180}
2162 2181
2163/** 2182/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e294f012647d..2dc0bac76717 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -12,6 +12,9 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
15 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING". 19 * the file called "COPYING".
17 * 20 *
@@ -216,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
216 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + 219 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
217 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); 220 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
218 vqci = kzalloc(len, GFP_ATOMIC); 221 vqci = kzalloc(len, GFP_ATOMIC);
219 if (!vqci) { 222 if (!vqci)
220 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
221 __func__);
222 return; 223 return;
223 } 224
224 vqci->vsi_id = adapter->vsi_res->vsi_id; 225 vqci->vsi_id = adapter->vsi_res->vsi_id;
225 vqci->num_queue_pairs = pairs; 226 vqci->num_queue_pairs = pairs;
226 vqpi = vqci->qpair; 227 vqpi = vqci->qpair;
@@ -232,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
232 vqpi->txq.queue_id = i; 233 vqpi->txq.queue_id = i;
233 vqpi->txq.ring_len = adapter->tx_rings[i]->count; 234 vqpi->txq.ring_len = adapter->tx_rings[i]->count;
234 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; 235 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
236 vqpi->txq.headwb_enabled = 1;
237 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
238 (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
235 239
236 vqpi->rxq.vsi_id = vqci->vsi_id; 240 vqpi->rxq.vsi_id = vqci->vsi_id;
237 vqpi->rxq.queue_id = i; 241 vqpi->rxq.queue_id = i;
@@ -329,11 +333,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
329 (adapter->num_msix_vectors * 333 (adapter->num_msix_vectors *
330 sizeof(struct i40e_virtchnl_vector_map)); 334 sizeof(struct i40e_virtchnl_vector_map));
331 vimi = kzalloc(len, GFP_ATOMIC); 335 vimi = kzalloc(len, GFP_ATOMIC);
332 if (!vimi) { 336 if (!vimi)
333 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
334 __func__);
335 return; 337 return;
336 }
337 338
338 vimi->num_vectors = adapter->num_msix_vectors; 339 vimi->num_vectors = adapter->num_msix_vectors;
339 /* Queue vectors first */ 340 /* Queue vectors first */
@@ -390,7 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
390 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 391 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
391 (count * sizeof(struct i40e_virtchnl_ether_addr)); 392 (count * sizeof(struct i40e_virtchnl_ether_addr));
392 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 393 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
393 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", 394 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
394 __func__); 395 __func__);
395 count = (I40EVF_MAX_AQ_BUF_SIZE - 396 count = (I40EVF_MAX_AQ_BUF_SIZE -
396 sizeof(struct i40e_virtchnl_ether_addr_list)) / 397 sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -399,16 +400,14 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
399 } 400 }
400 401
401 veal = kzalloc(len, GFP_ATOMIC); 402 veal = kzalloc(len, GFP_ATOMIC);
402 if (!veal) { 403 if (!veal)
403 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
404 __func__);
405 return; 404 return;
406 } 405
407 veal->vsi_id = adapter->vsi_res->vsi_id; 406 veal->vsi_id = adapter->vsi_res->vsi_id;
408 veal->num_elements = count; 407 veal->num_elements = count;
409 list_for_each_entry(f, &adapter->mac_filter_list, list) { 408 list_for_each_entry(f, &adapter->mac_filter_list, list) {
410 if (f->add) { 409 if (f->add) {
411 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); 410 ether_addr_copy(veal->list[i].addr, f->macaddr);
412 i++; 411 i++;
413 f->add = false; 412 f->add = false;
414 } 413 }
@@ -454,7 +453,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
454 len = sizeof(struct i40e_virtchnl_ether_addr_list) + 453 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
455 (count * sizeof(struct i40e_virtchnl_ether_addr)); 454 (count * sizeof(struct i40e_virtchnl_ether_addr));
456 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 455 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
457 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", 456 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
458 __func__); 457 __func__);
459 count = (I40EVF_MAX_AQ_BUF_SIZE - 458 count = (I40EVF_MAX_AQ_BUF_SIZE -
460 sizeof(struct i40e_virtchnl_ether_addr_list)) / 459 sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -462,16 +461,14 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
462 len = I40EVF_MAX_AQ_BUF_SIZE; 461 len = I40EVF_MAX_AQ_BUF_SIZE;
463 } 462 }
464 veal = kzalloc(len, GFP_ATOMIC); 463 veal = kzalloc(len, GFP_ATOMIC);
465 if (!veal) { 464 if (!veal)
466 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
467 __func__);
468 return; 465 return;
469 } 466
470 veal->vsi_id = adapter->vsi_res->vsi_id; 467 veal->vsi_id = adapter->vsi_res->vsi_id;
471 veal->num_elements = count; 468 veal->num_elements = count;
472 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 469 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
473 if (f->remove) { 470 if (f->remove) {
474 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); 471 ether_addr_copy(veal->list[i].addr, f->macaddr);
475 i++; 472 i++;
476 list_del(&f->list); 473 list_del(&f->list);
477 kfree(f); 474 kfree(f);
@@ -518,7 +515,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
518 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 515 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
519 (count * sizeof(u16)); 516 (count * sizeof(u16));
520 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 517 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
521 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", 518 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
522 __func__); 519 __func__);
523 count = (I40EVF_MAX_AQ_BUF_SIZE - 520 count = (I40EVF_MAX_AQ_BUF_SIZE -
524 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 521 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -526,11 +523,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
526 len = I40EVF_MAX_AQ_BUF_SIZE; 523 len = I40EVF_MAX_AQ_BUF_SIZE;
527 } 524 }
528 vvfl = kzalloc(len, GFP_ATOMIC); 525 vvfl = kzalloc(len, GFP_ATOMIC);
529 if (!vvfl) { 526 if (!vvfl)
530 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
531 __func__);
532 return; 527 return;
533 } 528
534 vvfl->vsi_id = adapter->vsi_res->vsi_id; 529 vvfl->vsi_id = adapter->vsi_res->vsi_id;
535 vvfl->num_elements = count; 530 vvfl->num_elements = count;
536 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 531 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
@@ -580,7 +575,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
580 len = sizeof(struct i40e_virtchnl_vlan_filter_list) + 575 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
581 (count * sizeof(u16)); 576 (count * sizeof(u16));
582 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 577 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
583 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", 578 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
584 __func__); 579 __func__);
585 count = (I40EVF_MAX_AQ_BUF_SIZE - 580 count = (I40EVF_MAX_AQ_BUF_SIZE -
586 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 581 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -588,11 +583,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
588 len = I40EVF_MAX_AQ_BUF_SIZE; 583 len = I40EVF_MAX_AQ_BUF_SIZE;
589 } 584 }
590 vvfl = kzalloc(len, GFP_ATOMIC); 585 vvfl = kzalloc(len, GFP_ATOMIC);
591 if (!vvfl) { 586 if (!vvfl)
592 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
593 __func__);
594 return; 587 return;
595 } 588
596 vvfl->vsi_id = adapter->vsi_res->vsi_id; 589 vvfl->vsi_id = adapter->vsi_res->vsi_id;
597 vvfl->num_elements = count; 590 vvfl->num_elements = count;
598 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 591 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
@@ -721,7 +714,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
721 return; 714 return;
722 } 715 }
723 if (v_opcode != adapter->current_op) { 716 if (v_opcode != adapter->current_op) {
724 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n", 717 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
725 __func__, adapter->current_op, v_opcode); 718 __func__, adapter->current_op, v_opcode);
726 /* We're probably completely screwed at this point, but clear 719 /* We're probably completely screwed at this point, but clear
727 * the current op and try to carry on.... 720 * the current op and try to carry on....
@@ -730,7 +723,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
730 return; 723 return;
731 } 724 }
732 if (v_retval) { 725 if (v_retval) {
733 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n", 726 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
734 __func__, v_retval, v_opcode); 727 __func__, v_retval, v_opcode);
735 } 728 }
736 switch (v_opcode) { 729 switch (v_opcode) {
@@ -745,9 +738,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
745 stats->tx_broadcast; 738 stats->tx_broadcast;
746 adapter->net_stats.rx_bytes = stats->rx_bytes; 739 adapter->net_stats.rx_bytes = stats->rx_bytes;
747 adapter->net_stats.tx_bytes = stats->tx_bytes; 740 adapter->net_stats.tx_bytes = stats->tx_bytes;
748 adapter->net_stats.rx_errors = stats->rx_errors;
749 adapter->net_stats.tx_errors = stats->tx_errors; 741 adapter->net_stats.tx_errors = stats->tx_errors;
750 adapter->net_stats.rx_dropped = stats->rx_missed; 742 adapter->net_stats.rx_dropped = stats->rx_discards;
751 adapter->net_stats.tx_dropped = stats->tx_discards; 743 adapter->net_stats.tx_dropped = stats->tx_discards;
752 adapter->current_stats = *stats; 744 adapter->current_stats = *stats;
753 } 745 }
@@ -781,7 +773,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
781 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); 773 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
782 break; 774 break;
783 default: 775 default:
784 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n", 776 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
785 __func__, v_opcode); 777 __func__, v_opcode);
786 break; 778 break;
787 } /* switch v_opcode */ 779 } /* switch v_opcode */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..a2db388cc31e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27/* e1000_82575 24/* e1000_82575
28 * e1000_82576 25 * e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
73static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 70static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
74static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 71static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
75static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 72static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
76static const u16 e1000_82580_rxpbs_table[] = 73static const u16 e1000_82580_rxpbs_table[] = {
77 { 36, 72, 144, 1, 2, 4, 8, 16, 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
78 35, 70, 140 };
79 75
80/** 76/**
81 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -159,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
159 ret_val = igb_check_for_link_82575(hw); 155 ret_val = igb_check_for_link_82575(hw);
160 } 156 }
161 157
162 return E1000_SUCCESS; 158 return 0;
163} 159}
164 160
165/** 161/**
@@ -526,7 +522,7 @@ out:
526static s32 igb_get_invariants_82575(struct e1000_hw *hw) 522static s32 igb_get_invariants_82575(struct e1000_hw *hw)
527{ 523{
528 struct e1000_mac_info *mac = &hw->mac; 524 struct e1000_mac_info *mac = &hw->mac;
529 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
530 s32 ret_val; 526 s32 ret_val;
531 u32 ctrl_ext = 0; 527 u32 ctrl_ext = 0;
532 u32 link_mode = 0; 528 u32 link_mode = 0;
@@ -1008,7 +1004,6 @@ out:
1008static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1004static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1009{ 1005{
1010 struct e1000_phy_info *phy = &hw->phy; 1006 struct e1000_phy_info *phy = &hw->phy;
1011 s32 ret_val = 0;
1012 u16 data; 1007 u16 data;
1013 1008
1014 data = rd32(E1000_82580_PHY_POWER_MGMT); 1009 data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1032,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1032 data &= ~E1000_82580_PM_SPD; } 1027 data &= ~E1000_82580_PM_SPD; }
1033 1028
1034 wr32(E1000_82580_PHY_POWER_MGMT, data); 1029 wr32(E1000_82580_PHY_POWER_MGMT, data);
1035 return ret_val; 1030 return 0;
1036} 1031}
1037 1032
1038/** 1033/**
@@ -1052,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1052static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1047static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1053{ 1048{
1054 struct e1000_phy_info *phy = &hw->phy; 1049 struct e1000_phy_info *phy = &hw->phy;
1055 s32 ret_val = 0;
1056 u16 data; 1050 u16 data;
1057 1051
1058 data = rd32(E1000_82580_PHY_POWER_MGMT); 1052 data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1077,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1077 } 1071 }
1078 1072
1079 wr32(E1000_82580_PHY_POWER_MGMT, data); 1073 wr32(E1000_82580_PHY_POWER_MGMT, data);
1080 return ret_val; 1074 return 0;
1081} 1075}
1082 1076
1083/** 1077/**
@@ -1180,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1180{ 1174{
1181 u32 swfw_sync; 1175 u32 swfw_sync;
1182 1176
1183 while (igb_get_hw_semaphore(hw) != 0); 1177 while (igb_get_hw_semaphore(hw) != 0)
1184 /* Empty */ 1178 ; /* Empty */
1185 1179
1186 swfw_sync = rd32(E1000_SW_FW_SYNC); 1180 swfw_sync = rd32(E1000_SW_FW_SYNC);
1187 swfw_sync &= ~mask; 1181 swfw_sync &= ~mask;
@@ -1203,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1203static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1197static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1204{ 1198{
1205 s32 timeout = PHY_CFG_TIMEOUT; 1199 s32 timeout = PHY_CFG_TIMEOUT;
1206 s32 ret_val = 0;
1207 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1200 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1208 1201
1209 if (hw->bus.func == 1) 1202 if (hw->bus.func == 1)
@@ -1216,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1216 while (timeout) { 1209 while (timeout) {
1217 if (rd32(E1000_EEMNGCTL) & mask) 1210 if (rd32(E1000_EEMNGCTL) & mask)
1218 break; 1211 break;
1219 msleep(1); 1212 usleep_range(1000, 2000);
1220 timeout--; 1213 timeout--;
1221 } 1214 }
1222 if (!timeout) 1215 if (!timeout)
@@ -1227,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1227 (hw->phy.type == e1000_phy_igp_3)) 1220 (hw->phy.type == e1000_phy_igp_3))
1228 igb_phy_init_script_igp3(hw); 1221 igb_phy_init_script_igp3(hw);
1229 1222
1230 return ret_val; 1223 return 0;
1231} 1224}
1232 1225
1233/** 1226/**
@@ -1269,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1269 1262
1270 if (hw->phy.media_type != e1000_media_type_copper) { 1263 if (hw->phy.media_type != e1000_media_type_copper) {
1271 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1264 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1272 &duplex); 1265 &duplex);
1273 /* Use this flag to determine if link needs to be checked or 1266 /* Use this flag to determine if link needs to be checked or
1274 * not. If we have link clear the flag so that we do not 1267 * not. If we have link clear the flag so that we do not
1275 * continue to check for link. 1268 * continue to check for link.
@@ -1316,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1316 1309
1317 /* flush the write to verify completion */ 1310 /* flush the write to verify completion */
1318 wrfl(); 1311 wrfl();
1319 msleep(1); 1312 usleep_range(1000, 2000);
1320} 1313}
1321 1314
1322/** 1315/**
@@ -1411,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1411 1404
1412 /* flush the write to verify completion */ 1405 /* flush the write to verify completion */
1413 wrfl(); 1406 wrfl();
1414 msleep(1); 1407 usleep_range(1000, 2000);
1415 } 1408 }
1416} 1409}
1417 1410
@@ -1436,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1436 1429
1437 /* set the completion timeout for interface */ 1430 /* set the completion timeout for interface */
1438 ret_val = igb_set_pcie_completion_timeout(hw); 1431 ret_val = igb_set_pcie_completion_timeout(hw);
1439 if (ret_val) { 1432 if (ret_val)
1440 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1433 hw_dbg("PCI-E Set completion timeout has failed.\n");
1441 }
1442 1434
1443 hw_dbg("Masking off all interrupts\n"); 1435 hw_dbg("Masking off all interrupts\n");
1444 wr32(E1000_IMC, 0xffffffff); 1436 wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1447 wr32(E1000_TCTL, E1000_TCTL_PSP); 1439 wr32(E1000_TCTL, E1000_TCTL_PSP);
1448 wrfl(); 1440 wrfl();
1449 1441
1450 msleep(10); 1442 usleep_range(10000, 20000);
1451 1443
1452 ctrl = rd32(E1000_CTRL); 1444 ctrl = rd32(E1000_CTRL);
1453 1445
@@ -1622,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1622{ 1614{
1623 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1615 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1624 bool pcs_autoneg; 1616 bool pcs_autoneg;
1625 s32 ret_val = E1000_SUCCESS; 1617 s32 ret_val = 0;
1626 u16 data; 1618 u16 data;
1627 1619
1628 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1620 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1676,7 +1668,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1676 hw->mac.type == e1000_82576) { 1668 hw->mac.type == e1000_82576) {
1677 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1669 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1678 if (ret_val) { 1670 if (ret_val) {
1679 printk(KERN_DEBUG "NVM Read Error\n\n"); 1671 hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1680 return ret_val; 1672 return ret_val;
1681 } 1673 }
1682 1674
@@ -1689,7 +1681,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1689 * link either autoneg or be forced to 1000/Full 1681 * link either autoneg or be forced to 1000/Full
1690 */ 1682 */
1691 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1683 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1692 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1684 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1693 1685
1694 /* set speed of 1000/Full if speed/duplex is forced */ 1686 /* set speed of 1000/Full if speed/duplex is forced */
1695 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1687 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1917,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1925 } 1917 }
1926 /* Poll all queues to verify they have shut down */ 1918 /* Poll all queues to verify they have shut down */
1927 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1919 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1928 msleep(1); 1920 usleep_range(1000, 2000);
1929 rx_enabled = 0; 1921 rx_enabled = 0;
1930 for (i = 0; i < 4; i++) 1922 for (i = 0; i < 4; i++)
1931 rx_enabled |= rd32(E1000_RXDCTL(i)); 1923 rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1945,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1953 wr32(E1000_RCTL, temp_rctl); 1945 wr32(E1000_RCTL, temp_rctl);
1954 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1946 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1955 wrfl(); 1947 wrfl();
1956 msleep(2); 1948 usleep_range(2000, 3000);
1957 1949
1958 /* Enable RX queues that were previously enabled and restore our 1950 /* Enable RX queues that were previously enabled and restore our
1959 * previous state 1951 * previous state
@@ -2005,14 +1997,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2005 * 16ms to 55ms 1997 * 16ms to 55ms
2006 */ 1998 */
2007 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1999 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2008 &pcie_devctl2); 2000 &pcie_devctl2);
2009 if (ret_val) 2001 if (ret_val)
2010 goto out; 2002 goto out;
2011 2003
2012 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2004 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2013 2005
2014 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2006 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2015 &pcie_devctl2); 2007 &pcie_devctl2);
2016out: 2008out:
2017 /* disable completion timeout resend */ 2009 /* disable completion timeout resend */
2018 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2010 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2233,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2241 wr32(E1000_TCTL, E1000_TCTL_PSP); 2233 wr32(E1000_TCTL, E1000_TCTL_PSP);
2242 wrfl(); 2234 wrfl();
2243 2235
2244 msleep(10); 2236 usleep_range(10000, 11000);
2245 2237
2246 /* Determine whether or not a global dev reset is requested */ 2238 /* Determine whether or not a global dev reset is requested */
2247 if (global_device_reset && 2239 if (global_device_reset &&
@@ -2259,7 +2251,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2259 2251
2260 /* Add delay to insure DEV_RST has time to complete */ 2252 /* Add delay to insure DEV_RST has time to complete */
2261 if (global_device_reset) 2253 if (global_device_reset)
2262 msleep(5); 2254 usleep_range(5000, 6000);
2263 2255
2264 ret_val = igb_get_auto_rd_done(hw); 2256 ret_val = igb_get_auto_rd_done(hw);
2265 if (ret_val) { 2257 if (ret_val) {
@@ -2436,8 +2428,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2436 2428
2437 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2429 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2438 if (ret_val) { 2430 if (ret_val) {
2439 hw_dbg("NVM Read Error while updating checksum" 2431 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2440 " compatibility bit.\n");
2441 goto out; 2432 goto out;
2442 } 2433 }
2443 2434
@@ -2447,8 +2438,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2447 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2438 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2448 &nvm_data); 2439 &nvm_data);
2449 if (ret_val) { 2440 if (ret_val) {
2450 hw_dbg("NVM Write Error while updating checksum" 2441 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2451 " compatibility bit.\n");
2452 goto out; 2442 goto out;
2453 } 2443 }
2454 } 2444 }
@@ -2525,7 +2515,7 @@ out:
2525static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2515static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2526 u16 *data, bool read) 2516 u16 *data, bool read)
2527{ 2517{
2528 s32 ret_val = E1000_SUCCESS; 2518 s32 ret_val = 0;
2529 2519
2530 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2520 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2531 if (ret_val) 2521 if (ret_val)
@@ -2559,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2559 **/ 2549 **/
2560s32 igb_set_eee_i350(struct e1000_hw *hw) 2550s32 igb_set_eee_i350(struct e1000_hw *hw)
2561{ 2551{
2562 s32 ret_val = 0;
2563 u32 ipcnfg, eeer; 2552 u32 ipcnfg, eeer;
2564 2553
2565 if ((hw->mac.type < e1000_i350) || 2554 if ((hw->mac.type < e1000_i350) ||
@@ -2593,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2593 rd32(E1000_EEER); 2582 rd32(E1000_EEER);
2594out: 2583out:
2595 2584
2596 return ret_val; 2585 return 0;
2597} 2586}
2598 2587
2599/** 2588/**
@@ -2720,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
2720 **/ 2709 **/
2721static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2710static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2722{ 2711{
2723 s32 status = E1000_SUCCESS;
2724 u16 ets_offset; 2712 u16 ets_offset;
2725 u16 ets_cfg; 2713 u16 ets_cfg;
2726 u16 ets_sensor; 2714 u16 ets_sensor;
@@ -2738,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2738 /* Return the internal sensor only if ETS is unsupported */ 2726 /* Return the internal sensor only if ETS is unsupported */
2739 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2727 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2740 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2728 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2741 return status; 2729 return 0;
2742 2730
2743 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2731 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2744 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2732 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2762,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2762 E1000_I2C_THERMAL_SENSOR_ADDR, 2750 E1000_I2C_THERMAL_SENSOR_ADDR,
2763 &data->sensor[i].temp); 2751 &data->sensor[i].temp);
2764 } 2752 }
2765 return status; 2753 return 0;
2766} 2754}
2767 2755
2768/** 2756/**
@@ -2774,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2774 **/ 2762 **/
2775static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2763static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2776{ 2764{
2777 s32 status = E1000_SUCCESS;
2778 u16 ets_offset; 2765 u16 ets_offset;
2779 u16 ets_cfg; 2766 u16 ets_cfg;
2780 u16 ets_sensor; 2767 u16 ets_sensor;
@@ -2800,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2800 /* Return the internal sensor only if ETS is unsupported */ 2787 /* Return the internal sensor only if ETS is unsupported */
2801 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2788 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2802 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2789 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2803 return status; 2790 return 0;
2804 2791
2805 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2792 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2806 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2793 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2831,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2831 low_thresh_delta; 2818 low_thresh_delta;
2832 } 2819 }
2833 } 2820 }
2834 return status; 2821 return 0;
2835} 2822}
2836 2823
2837#endif 2824#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_82575_H_ 24#ifndef _E1000_82575_H_
28#define _E1000_82575_H_ 25#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
37 u8 data); 34 u8 data);
38 35
39#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ 36#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
40 (ID_LED_DEF1_DEF2 << 8) | \ 37 (ID_LED_DEF1_DEF2 << 8) | \
41 (ID_LED_DEF1_DEF2 << 4) | \ 38 (ID_LED_DEF1_DEF2 << 4) | \
42 (ID_LED_OFF1_ON2)) 39 (ID_LED_OFF1_ON2))
43 40
44#define E1000_RAR_ENTRIES_82575 16 41#define E1000_RAR_ENTRIES_82575 16
45#define E1000_RAR_ENTRIES_82576 24 42#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
67#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 64#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
68 65
69#define E1000_EICR_TX_QUEUE ( \ 66#define E1000_EICR_TX_QUEUE ( \
70 E1000_EICR_TX_QUEUE0 | \ 67 E1000_EICR_TX_QUEUE0 | \
71 E1000_EICR_TX_QUEUE1 | \ 68 E1000_EICR_TX_QUEUE1 | \
72 E1000_EICR_TX_QUEUE2 | \ 69 E1000_EICR_TX_QUEUE2 | \
73 E1000_EICR_TX_QUEUE3) 70 E1000_EICR_TX_QUEUE3)
74 71
75#define E1000_EICR_RX_QUEUE ( \ 72#define E1000_EICR_RX_QUEUE ( \
76 E1000_EICR_RX_QUEUE0 | \ 73 E1000_EICR_RX_QUEUE0 | \
77 E1000_EICR_RX_QUEUE1 | \ 74 E1000_EICR_RX_QUEUE1 | \
78 E1000_EICR_RX_QUEUE2 | \ 75 E1000_EICR_RX_QUEUE2 | \
79 E1000_EICR_RX_QUEUE3) 76 E1000_EICR_RX_QUEUE3)
80 77
81/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 78/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
82#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 79#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
92 struct { 89 struct {
93 struct { 90 struct {
94 __le16 pkt_info; /* RSS type, Packet type */ 91 __le16 pkt_info; /* RSS type, Packet type */
95 __le16 hdr_info; /* Split Header, 92 __le16 hdr_info; /* Split Head, buf len */
96 * header buffer length */
97 } lo_dword; 93 } lo_dword;
98 union { 94 union {
99 __le32 rss; /* RSS Hash */ 95 __le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..2a8bb35c2df2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_DEFINES_H_ 24#ifndef _E1000_DEFINES_H_
28#define _E1000_DEFINES_H_ 25#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
101 98
102/* Same mask, but for extended and packet split descriptors */ 99/* Same mask, but for extended and packet split descriptors */
103#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 100#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
104 E1000_RXDEXT_STATERR_CE | \ 101 E1000_RXDEXT_STATERR_CE | \
105 E1000_RXDEXT_STATERR_SE | \ 102 E1000_RXDEXT_STATERR_SE | \
106 E1000_RXDEXT_STATERR_SEQ | \ 103 E1000_RXDEXT_STATERR_SEQ | \
107 E1000_RXDEXT_STATERR_CXE | \ 104 E1000_RXDEXT_STATERR_CXE | \
108 E1000_RXDEXT_STATERR_RXE) 105 E1000_RXDEXT_STATERR_RXE)
109 106
110#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 107#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
111#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 108#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,39 +304,34 @@
307#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 304#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
308 305
309/* DMA Coalescing register fields */ 306/* DMA Coalescing register fields */
310#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing 307#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
311 * Watchdog Timer */ 308#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
312#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
313 * Threshold */
314#define E1000_DMACR_DMACTHR_SHIFT 16 309#define E1000_DMACR_DMACTHR_SHIFT 16
315#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe 310#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
316 * transactions */
317#define E1000_DMACR_DMAC_LX_SHIFT 28 311#define E1000_DMACR_DMAC_LX_SHIFT 28
318#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ 312#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
319/* DMA Coalescing BMC-to-OS Watchdog Enable */ 313/* DMA Coalescing BMC-to-OS Watchdog Enable */
320#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 314#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
321 315
322#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit 316#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
323 * Threshold */
324 317
325#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ 318#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
326 319
327#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate 320#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
328 * Threshold */ 321#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
329#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
330 * current window */
331 322
332#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic 323#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
333 * Current Cnt */
334 324
335#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold 325#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
336 * High val */
337#define E1000_FCRTC_RTH_COAL_SHIFT 4 326#define E1000_FCRTC_RTH_COAL_SHIFT 4
338#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ 327#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
339 328
340/* Timestamp in Rx buffer */ 329/* Timestamp in Rx buffer */
341#define E1000_RXPBS_CFG_TS_EN 0x80000000 330#define E1000_RXPBS_CFG_TS_EN 0x80000000
342 331
332#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
333#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
334
343/* SerDes Control */ 335/* SerDes Control */
344#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 336#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
345 337
@@ -406,12 +398,12 @@
406 * o LSC = Link Status Change 398 * o LSC = Link Status Change
407 */ 399 */
408#define IMS_ENABLE_MASK ( \ 400#define IMS_ENABLE_MASK ( \
409 E1000_IMS_RXT0 | \ 401 E1000_IMS_RXT0 | \
410 E1000_IMS_TXDW | \ 402 E1000_IMS_TXDW | \
411 E1000_IMS_RXDMT0 | \ 403 E1000_IMS_RXDMT0 | \
412 E1000_IMS_RXSEQ | \ 404 E1000_IMS_RXSEQ | \
413 E1000_IMS_LSC | \ 405 E1000_IMS_LSC | \
414 E1000_IMS_DOUTSYNC) 406 E1000_IMS_DOUTSYNC)
415 407
416/* Interrupt Mask Set */ 408/* Interrupt Mask Set */
417#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 409#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -467,7 +459,6 @@
467#define E1000_RAH_POOL_1 0x00040000 459#define E1000_RAH_POOL_1 0x00040000
468 460
469/* Error Codes */ 461/* Error Codes */
470#define E1000_SUCCESS 0
471#define E1000_ERR_NVM 1 462#define E1000_ERR_NVM 1
472#define E1000_ERR_PHY 2 463#define E1000_ERR_PHY 2
473#define E1000_ERR_CONFIG 3 464#define E1000_ERR_CONFIG 3
@@ -1011,8 +1002,7 @@
1011#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 1002#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
1012 1003
1013/* DMA Coalescing register fields */ 1004/* DMA Coalescing register fields */
1014#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based 1005#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
1015 on DMA coal */
1016 1006
1017/* Tx Rate-Scheduler Config fields */ 1007/* Tx Rate-Scheduler Config fields */
1018#define E1000_RTTBCNRC_RS_ENA 0x80000000 1008#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 *
7 under the terms and conditions of the GNU General Public License, 7 * This program is distributed in the hope it will be useful, but WITHOUT
8 version 2, as published by the Free Software Foundation. 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * more details.
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 *
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * You should have received a copy of the GNU General Public License along with
13 more details. 13 * this program; if not, see <http://www.gnu.org/licenses/>.
14 14 *
15 You should have received a copy of the GNU General Public License along with 15 * The full GNU General Public License is included in this distribution in
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * the file called "COPYING".
17 17 *
18 The full GNU General Public License is included in this distribution in 18 * Contact Information:
19 the file called "COPYING". 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 Contact Information: 21 */
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 22
27#ifndef _E1000_HW_H_ 23#ifndef _E1000_HW_H_
28#define _E1000_HW_H_ 24#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
320#include "e1000_mbx.h" 316#include "e1000_mbx.h"
321 317
322struct e1000_mac_operations { 318struct e1000_mac_operations {
323 s32 (*check_for_link)(struct e1000_hw *); 319 s32 (*check_for_link)(struct e1000_hw *);
324 s32 (*reset_hw)(struct e1000_hw *); 320 s32 (*reset_hw)(struct e1000_hw *);
325 s32 (*init_hw)(struct e1000_hw *); 321 s32 (*init_hw)(struct e1000_hw *);
326 bool (*check_mng_mode)(struct e1000_hw *); 322 bool (*check_mng_mode)(struct e1000_hw *);
327 s32 (*setup_physical_interface)(struct e1000_hw *); 323 s32 (*setup_physical_interface)(struct e1000_hw *);
328 void (*rar_set)(struct e1000_hw *, u8 *, u32); 324 void (*rar_set)(struct e1000_hw *, u8 *, u32);
329 s32 (*read_mac_addr)(struct e1000_hw *); 325 s32 (*read_mac_addr)(struct e1000_hw *);
330 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 326 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
331 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); 327 s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
332 void (*release_swfw_sync)(struct e1000_hw *, u16); 328 void (*release_swfw_sync)(struct e1000_hw *, u16);
333#ifdef CONFIG_IGB_HWMON 329#ifdef CONFIG_IGB_HWMON
334 s32 (*get_thermal_sensor_data)(struct e1000_hw *); 330 s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
338}; 334};
339 335
340struct e1000_phy_operations { 336struct e1000_phy_operations {
341 s32 (*acquire)(struct e1000_hw *); 337 s32 (*acquire)(struct e1000_hw *);
342 s32 (*check_polarity)(struct e1000_hw *); 338 s32 (*check_polarity)(struct e1000_hw *);
343 s32 (*check_reset_block)(struct e1000_hw *); 339 s32 (*check_reset_block)(struct e1000_hw *);
344 s32 (*force_speed_duplex)(struct e1000_hw *); 340 s32 (*force_speed_duplex)(struct e1000_hw *);
345 s32 (*get_cfg_done)(struct e1000_hw *hw); 341 s32 (*get_cfg_done)(struct e1000_hw *hw);
346 s32 (*get_cable_length)(struct e1000_hw *); 342 s32 (*get_cable_length)(struct e1000_hw *);
347 s32 (*get_phy_info)(struct e1000_hw *); 343 s32 (*get_phy_info)(struct e1000_hw *);
348 s32 (*read_reg)(struct e1000_hw *, u32, u16 *); 344 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
349 void (*release)(struct e1000_hw *); 345 void (*release)(struct e1000_hw *);
350 s32 (*reset)(struct e1000_hw *); 346 s32 (*reset)(struct e1000_hw *);
351 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 347 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
352 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 348 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
353 s32 (*write_reg)(struct e1000_hw *, u32, u16); 349 s32 (*write_reg)(struct e1000_hw *, u32, u16);
354 s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); 350 s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
355 s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); 351 s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
356}; 352};
357 353
358struct e1000_nvm_operations { 354struct e1000_nvm_operations {
359 s32 (*acquire)(struct e1000_hw *); 355 s32 (*acquire)(struct e1000_hw *);
360 s32 (*read)(struct e1000_hw *, u16, u16, u16 *); 356 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
361 void (*release)(struct e1000_hw *); 357 void (*release)(struct e1000_hw *);
362 s32 (*write)(struct e1000_hw *, u16, u16, u16 *); 358 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
363 s32 (*update)(struct e1000_hw *); 359 s32 (*update)(struct e1000_hw *);
364 s32 (*validate)(struct e1000_hw *); 360 s32 (*validate)(struct e1000_hw *);
365 s32 (*valid_led_default)(struct e1000_hw *, u16 *); 361 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
366}; 362};
367 363
368#define E1000_MAX_SENSORS 3 364#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..337161f440dd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26 23
27/* e1000_i210 24/* e1000_i210
28 * e1000_i211 25 * e1000_i211
@@ -100,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
100 return -E1000_ERR_NVM; 97 return -E1000_ERR_NVM;
101 } 98 }
102 99
103 return E1000_SUCCESS; 100 return 0;
104} 101}
105 102
106/** 103/**
@@ -142,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
142 u32 swfw_sync; 139 u32 swfw_sync;
143 u32 swmask = mask; 140 u32 swmask = mask;
144 u32 fwmask = mask << 16; 141 u32 fwmask = mask << 16;
145 s32 ret_val = E1000_SUCCESS; 142 s32 ret_val = 0;
146 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 143 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
147 144
148 while (i < timeout) { 145 while (i < timeout) {
@@ -187,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
187{ 184{
188 u32 swfw_sync; 185 u32 swfw_sync;
189 186
190 while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS) 187 while (igb_get_hw_semaphore_i210(hw))
191 ; /* Empty */ 188 ; /* Empty */
192 189
193 swfw_sync = rd32(E1000_SW_FW_SYNC); 190 swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -210,7 +207,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
210static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 207static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
211 u16 *data) 208 u16 *data)
212{ 209{
213 s32 status = E1000_SUCCESS; 210 s32 status = 0;
214 u16 i, count; 211 u16 i, count;
215 212
216 /* We cannot hold synchronization semaphores for too long, 213 /* We cannot hold synchronization semaphores for too long,
@@ -220,7 +217,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
220 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 217 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
221 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 218 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
222 E1000_EERD_EEWR_MAX_COUNT : (words - i); 219 E1000_EERD_EEWR_MAX_COUNT : (words - i);
223 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 220 if (!(hw->nvm.ops.acquire(hw))) {
224 status = igb_read_nvm_eerd(hw, offset, count, 221 status = igb_read_nvm_eerd(hw, offset, count,
225 data + i); 222 data + i);
226 hw->nvm.ops.release(hw); 223 hw->nvm.ops.release(hw);
@@ -228,7 +225,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
228 status = E1000_ERR_SWFW_SYNC; 225 status = E1000_ERR_SWFW_SYNC;
229 } 226 }
230 227
231 if (status != E1000_SUCCESS) 228 if (status)
232 break; 229 break;
233 } 230 }
234 231
@@ -253,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
253 struct e1000_nvm_info *nvm = &hw->nvm; 250 struct e1000_nvm_info *nvm = &hw->nvm;
254 u32 i, k, eewr = 0; 251 u32 i, k, eewr = 0;
255 u32 attempts = 100000; 252 u32 attempts = 100000;
256 s32 ret_val = E1000_SUCCESS; 253 s32 ret_val = 0;
257 254
258 /* A check for invalid values: offset too large, too many words, 255 /* A check for invalid values: offset too large, too many words,
259 * too many words for the offset, and not enough words. 256 * too many words for the offset, and not enough words.
@@ -275,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
275 for (k = 0; k < attempts; k++) { 272 for (k = 0; k < attempts; k++) {
276 if (E1000_NVM_RW_REG_DONE & 273 if (E1000_NVM_RW_REG_DONE &
277 rd32(E1000_SRWR)) { 274 rd32(E1000_SRWR)) {
278 ret_val = E1000_SUCCESS; 275 ret_val = 0;
279 break; 276 break;
280 } 277 }
281 udelay(5); 278 udelay(5);
282 } 279 }
283 280
284 if (ret_val != E1000_SUCCESS) { 281 if (ret_val) {
285 hw_dbg("Shadow RAM write EEWR timed out\n"); 282 hw_dbg("Shadow RAM write EEWR timed out\n");
286 break; 283 break;
287 } 284 }
@@ -310,7 +307,7 @@ out:
310static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 307static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
311 u16 *data) 308 u16 *data)
312{ 309{
313 s32 status = E1000_SUCCESS; 310 s32 status = 0;
314 u16 i, count; 311 u16 i, count;
315 312
316 /* We cannot hold synchronization semaphores for too long, 313 /* We cannot hold synchronization semaphores for too long,
@@ -320,7 +317,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
320 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 317 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
321 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 318 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
322 E1000_EERD_EEWR_MAX_COUNT : (words - i); 319 E1000_EERD_EEWR_MAX_COUNT : (words - i);
323 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 320 if (!(hw->nvm.ops.acquire(hw))) {
324 status = igb_write_nvm_srwr(hw, offset, count, 321 status = igb_write_nvm_srwr(hw, offset, count,
325 data + i); 322 data + i);
326 hw->nvm.ops.release(hw); 323 hw->nvm.ops.release(hw);
@@ -328,7 +325,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
328 status = E1000_ERR_SWFW_SYNC; 325 status = E1000_ERR_SWFW_SYNC;
329 } 326 }
330 327
331 if (status != E1000_SUCCESS) 328 if (status)
332 break; 329 break;
333 } 330 }
334 331
@@ -367,12 +364,12 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 364 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x\n", 365 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 366 address, *data);
370 status = E1000_SUCCESS; 367 status = 0;
371 break; 368 break;
372 } 369 }
373 } 370 }
374 } 371 }
375 if (status != E1000_SUCCESS) 372 if (status)
376 hw_dbg("Requested word 0x%02x not found in OTP\n", address); 373 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
377 return status; 374 return status;
378} 375}
@@ -388,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
388static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, 385static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
389 u16 words __always_unused, u16 *data) 386 u16 words __always_unused, u16 *data)
390{ 387{
391 s32 ret_val = E1000_SUCCESS; 388 s32 ret_val = 0;
392 389
393 /* Only the MAC addr is required to be present in the iNVM */ 390 /* Only the MAC addr is required to be present in the iNVM */
394 switch (offset) { 391 switch (offset) {
@@ -398,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
398 &data[1]); 395 &data[1]);
399 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, 396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
400 &data[2]); 397 &data[2]);
401 if (ret_val != E1000_SUCCESS) 398 if (ret_val)
402 hw_dbg("MAC Addr not found in iNVM\n"); 399 hw_dbg("MAC Addr not found in iNVM\n");
403 break; 400 break;
404 case NVM_INIT_CTRL_2: 401 case NVM_INIT_CTRL_2:
405 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 402 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
406 if (ret_val != E1000_SUCCESS) { 403 if (ret_val) {
407 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 404 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
408 ret_val = E1000_SUCCESS; 405 ret_val = 0;
409 } 406 }
410 break; 407 break;
411 case NVM_INIT_CTRL_4: 408 case NVM_INIT_CTRL_4:
412 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 409 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
413 if (ret_val != E1000_SUCCESS) { 410 if (ret_val) {
414 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 411 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
415 ret_val = E1000_SUCCESS; 412 ret_val = 0;
416 } 413 }
417 break; 414 break;
418 case NVM_LED_1_CFG: 415 case NVM_LED_1_CFG:
419 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 416 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
420 if (ret_val != E1000_SUCCESS) { 417 if (ret_val) {
421 *data = NVM_LED_1_CFG_DEFAULT_I211; 418 *data = NVM_LED_1_CFG_DEFAULT_I211;
422 ret_val = E1000_SUCCESS; 419 ret_val = 0;
423 } 420 }
424 break; 421 break;
425 case NVM_LED_0_2_CFG: 422 case NVM_LED_0_2_CFG:
426 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 423 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
427 if (ret_val != E1000_SUCCESS) { 424 if (ret_val) {
428 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 425 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
429 ret_val = E1000_SUCCESS; 426 ret_val = 0;
430 } 427 }
431 break; 428 break;
432 case NVM_ID_LED_SETTINGS: 429 case NVM_ID_LED_SETTINGS:
433 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); 430 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
434 if (ret_val != E1000_SUCCESS) { 431 if (ret_val) {
435 *data = ID_LED_RESERVED_FFFF; 432 *data = ID_LED_RESERVED_FFFF;
436 ret_val = E1000_SUCCESS; 433 ret_val = 0;
437 } 434 }
435 break;
438 case NVM_SUB_DEV_ID: 436 case NVM_SUB_DEV_ID:
439 *data = hw->subsystem_device_id; 437 *data = hw->subsystem_device_id;
440 break; 438 break;
@@ -488,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
488 /* Check if we have first version location used */ 486 /* Check if we have first version location used */
489 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { 487 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
490 version = 0; 488 version = 0;
491 status = E1000_SUCCESS; 489 status = 0;
492 break; 490 break;
493 } 491 }
494 /* Check if we have second version location used */ 492 /* Check if we have second version location used */
495 else if ((i == 1) && 493 else if ((i == 1) &&
496 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { 494 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
497 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 495 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
498 status = E1000_SUCCESS; 496 status = 0;
499 break; 497 break;
500 } 498 }
501 /* Check if we have odd version location 499 /* Check if we have odd version location
@@ -506,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
506 (i != 1))) { 504 (i != 1))) {
507 version = (*next_record & E1000_INVM_VER_FIELD_TWO) 505 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
508 >> 13; 506 >> 13;
509 status = E1000_SUCCESS; 507 status = 0;
510 break; 508 break;
511 } 509 }
512 /* Check if we have even version location 510 /* Check if we have even version location
@@ -515,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
515 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && 513 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
516 ((*record & 0x3) == 0)) { 514 ((*record & 0x3) == 0)) {
517 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; 515 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
518 status = E1000_SUCCESS; 516 status = 0;
519 break; 517 break;
520 } 518 }
521 } 519 }
522 520
523 if (status == E1000_SUCCESS) { 521 if (!status) {
524 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) 522 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
525 >> E1000_INVM_MAJOR_SHIFT; 523 >> E1000_INVM_MAJOR_SHIFT;
526 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; 524 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -533,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
533 /* Check if we have image type in first location used */ 531 /* Check if we have image type in first location used */
534 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { 532 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
535 invm_ver->invm_img_type = 0; 533 invm_ver->invm_img_type = 0;
536 status = E1000_SUCCESS; 534 status = 0;
537 break; 535 break;
538 } 536 }
539 /* Check if we have image type in first location used */ 537 /* Check if we have image type in first location used */
@@ -542,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
542 ((((*record & 0x3) != 0) && (i != 1)))) { 540 ((((*record & 0x3) != 0) && (i != 1)))) {
543 invm_ver->invm_img_type = 541 invm_ver->invm_img_type =
544 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; 542 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
545 status = E1000_SUCCESS; 543 status = 0;
546 break; 544 break;
547 } 545 }
548 } 546 }
@@ -558,10 +556,10 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
558 **/ 556 **/
559static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 557static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
560{ 558{
561 s32 status = E1000_SUCCESS; 559 s32 status = 0;
562 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 560 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
563 561
564 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 562 if (!(hw->nvm.ops.acquire(hw))) {
565 563
566 /* Replace the read function with semaphore grabbing with 564 /* Replace the read function with semaphore grabbing with
567 * the one that skips this for a while. 565 * the one that skips this for a while.
@@ -593,7 +591,7 @@ static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
593 **/ 591 **/
594static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 592static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
595{ 593{
596 s32 ret_val = E1000_SUCCESS; 594 s32 ret_val = 0;
597 u16 checksum = 0; 595 u16 checksum = 0;
598 u16 i, nvm_data; 596 u16 i, nvm_data;
599 597
@@ -602,12 +600,12 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
602 * EEPROM read fails 600 * EEPROM read fails
603 */ 601 */
604 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); 602 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
605 if (ret_val != E1000_SUCCESS) { 603 if (ret_val) {
606 hw_dbg("EEPROM read failed\n"); 604 hw_dbg("EEPROM read failed\n");
607 goto out; 605 goto out;
608 } 606 }
609 607
610 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 608 if (!(hw->nvm.ops.acquire(hw))) {
611 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read 609 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
612 * because we do not want to take the synchronization 610 * because we do not want to take the synchronization
613 * semaphores twice here. 611 * semaphores twice here.
@@ -625,7 +623,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
625 checksum = (u16) NVM_SUM - checksum; 623 checksum = (u16) NVM_SUM - checksum;
626 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, 624 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
627 &checksum); 625 &checksum);
628 if (ret_val != E1000_SUCCESS) { 626 if (ret_val) {
629 hw->nvm.ops.release(hw); 627 hw->nvm.ops.release(hw);
630 hw_dbg("NVM Write Error while updating checksum.\n"); 628 hw_dbg("NVM Write Error while updating checksum.\n");
631 goto out; 629 goto out;
@@ -654,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
654 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { 652 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
655 reg = rd32(E1000_EECD); 653 reg = rd32(E1000_EECD);
656 if (reg & E1000_EECD_FLUDONE_I210) { 654 if (reg & E1000_EECD_FLUDONE_I210) {
657 ret_val = E1000_SUCCESS; 655 ret_val = 0;
658 break; 656 break;
659 } 657 }
660 udelay(5); 658 udelay(5);
@@ -687,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
687 **/ 685 **/
688static s32 igb_update_flash_i210(struct e1000_hw *hw) 686static s32 igb_update_flash_i210(struct e1000_hw *hw)
689{ 687{
690 s32 ret_val = E1000_SUCCESS; 688 s32 ret_val = 0;
691 u32 flup; 689 u32 flup;
692 690
693 ret_val = igb_pool_flash_update_done_i210(hw); 691 ret_val = igb_pool_flash_update_done_i210(hw);
@@ -700,7 +698,7 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
700 wr32(E1000_EECD, flup); 698 wr32(E1000_EECD, flup);
701 699
702 ret_val = igb_pool_flash_update_done_i210(hw); 700 ret_val = igb_pool_flash_update_done_i210(hw);
703 if (ret_val == E1000_SUCCESS) 701 if (ret_val)
704 hw_dbg("Flash update complete\n"); 702 hw_dbg("Flash update complete\n");
705 else 703 else
706 hw_dbg("Flash update time out\n"); 704 hw_dbg("Flash update time out\n");
@@ -753,7 +751,7 @@ out:
753static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, 751static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
754 u8 dev_addr, u16 *data, bool read) 752 u8 dev_addr, u16 *data, bool read)
755{ 753{
756 s32 ret_val = E1000_SUCCESS; 754 s32 ret_val = 0;
757 755
758 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); 756 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
759 if (ret_val) 757 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_I210_H_ 24#ifndef _E1000_I210_H_
28#define _E1000_I210_H_ 25#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include <linux/if_ether.h> 24#include <linux/if_ether.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
442 * The caller must have a packed mc_addr_list of multicast addresses. 439 * The caller must have a packed mc_addr_list of multicast addresses.
443 **/ 440 **/
444void igb_update_mc_addr_list(struct e1000_hw *hw, 441void igb_update_mc_addr_list(struct e1000_hw *hw,
445 u8 *mc_addr_list, u32 mc_addr_count) 442 u8 *mc_addr_list, u32 mc_addr_count)
446{ 443{
447 u32 hash_value, hash_bit, hash_reg; 444 u32 hash_value, hash_bit, hash_reg;
448 int i; 445 int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
866 goto out; 863 goto out;
867 864
868 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 865 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
869 hw_dbg("Copper PHY and Auto Neg " 866 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
870 "has not completed.\n");
871 goto out; 867 goto out;
872 } 868 }
873 869
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1265 while (i < AUTO_READ_DONE_TIMEOUT) { 1261 while (i < AUTO_READ_DONE_TIMEOUT) {
1266 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) 1262 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1267 break; 1263 break;
1268 msleep(1); 1264 usleep_range(1000, 2000);
1269 i++; 1265 i++;
1270 } 1266 }
1271 1267
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1298 } 1294 }
1299 1295
1300 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1296 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1301 switch(hw->phy.media_type) { 1297 switch (hw->phy.media_type) {
1302 case e1000_media_type_internal_serdes: 1298 case e1000_media_type_internal_serdes:
1303 *data = ID_LED_DEFAULT_82575_SERDES; 1299 *data = ID_LED_DEFAULT_82575_SERDES;
1304 break; 1300 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_MAC_H_ 24#ifndef _E1000_MAC_H_
28#define _E1000_MAC_H_ 25#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include "e1000_mbx.h" 24#include "e1000_mbx.h"
28 25
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_MBX_H_ 24#ifndef _E1000_MBX_H_
28#define _E1000_MBX_H_ 25#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..e8280d0d7f02 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 * This program is free software; you can redistribute it and/or modify it
4 Copyright(c) 2007-2014 Intel Corporation. 4 * under the terms and conditions of the GNU General Public License,
5 5 * version 2, as published by the Free Software Foundation.
6 This program is free software; you can redistribute it and/or modify it 6 *
7 under the terms and conditions of the GNU General Public License, 7 * This program is distributed in the hope it will be useful, but WITHOUT
8 version 2, as published by the Free Software Foundation. 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * more details.
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 *
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * You should have received a copy of the GNU General Public License along with
13 more details. 13 * this program; if not, see <http://www.gnu.org/licenses/>.
14 14 *
15 You should have received a copy of the GNU General Public License along with 15 * The full GNU General Public License is included in this distribution in
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * the file called "COPYING".
17 17 *
18 The full GNU General Public License is included in this distribution in 18 * Contact Information:
19 the file called "COPYING". 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 Contact Information: 21 */
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 22
27#include <linux/if_ether.h> 23#include <linux/if_ether.h>
28#include <linux/delay.h> 24#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
480 /* Loop to allow for up to whole page write of eeprom */ 476 /* Loop to allow for up to whole page write of eeprom */
481 while (widx < words) { 477 while (widx < words) {
482 u16 word_out = data[widx]; 478 u16 word_out = data[widx];
479
483 word_out = (word_out >> 8) | (word_out << 8); 480 word_out = (word_out >> 8) | (word_out << 8);
484 igb_shift_out_eec_bits(hw, word_out, 16); 481 igb_shift_out_eec_bits(hw, word_out, 16);
485 widx++; 482 widx++;
@@ -801,5 +798,4 @@ etrack_id:
801 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) 798 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
802 | eeprom_verl; 799 | eeprom_verl;
803 } 800 }
804 return;
805} 801}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_NVM_H_ 24#ifndef _E1000_NVM_H_
28#define _E1000_NVM_H_ 25#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
32s32 igb_read_mac_addr(struct e1000_hw *hw); 29s32 igb_read_mac_addr(struct e1000_hw *hw);
33s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); 30s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
34s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, 31s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
35 u32 part_num_size); 32 u32 part_num_size);
36s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 33s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
37s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 34s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 35s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..c1bb64d8366f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include <linux/if_ether.h> 24#include <linux/if_ether.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
924 if (phy->autoneg_wait_to_complete) { 921 if (phy->autoneg_wait_to_complete) {
925 ret_val = igb_wait_autoneg(hw); 922 ret_val = igb_wait_autoneg(hw);
926 if (ret_val) { 923 if (ret_val) {
927 hw_dbg("Error while waiting for " 924 hw_dbg("Error while waiting for autoneg to complete\n");
928 "autoneg to complete\n");
929 goto out; 925 goto out;
930 } 926 }
931 } 927 }
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2208void igb_power_up_phy_copper(struct e1000_hw *hw) 2204void igb_power_up_phy_copper(struct e1000_hw *hw)
2209{ 2205{
2210 u16 mii_reg = 0; 2206 u16 mii_reg = 0;
2211 u16 power_reg = 0;
2212 2207
2213 /* The PHY will retain its settings across a power down/up cycle */ 2208 /* The PHY will retain its settings across a power down/up cycle */
2214 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2209 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2215 mii_reg &= ~MII_CR_POWER_DOWN; 2210 mii_reg &= ~MII_CR_POWER_DOWN;
2216 if (hw->phy.type == e1000_phy_i210) {
2217 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2218 power_reg &= ~GS40G_CS_POWER_DOWN;
2219 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2220 }
2221 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2211 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2222} 2212}
2223 2213
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
2231void igb_power_down_phy_copper(struct e1000_hw *hw) 2221void igb_power_down_phy_copper(struct e1000_hw *hw)
2232{ 2222{
2233 u16 mii_reg = 0; 2223 u16 mii_reg = 0;
2234 u16 power_reg = 0;
2235 2224
2236 /* The PHY will retain its settings across a power down/up cycle */ 2225 /* The PHY will retain its settings across a power down/up cycle */
2237 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); 2226 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
2238 mii_reg |= MII_CR_POWER_DOWN; 2227 mii_reg |= MII_CR_POWER_DOWN;
2239
2240 /* i210 Phy requires an additional bit for power up/down */
2241 if (hw->phy.type == e1000_phy_i210) {
2242 hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
2243 power_reg |= GS40G_CS_POWER_DOWN;
2244 hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
2245 }
2246 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); 2228 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
2247 msleep(1); 2229 usleep_range(1000, 2000);
2248} 2230}
2249 2231
2250/** 2232/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..7af4ffab0285 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_PHY_H_ 24#ifndef _E1000_PHY_H_
28#define _E1000_PHY_H_ 25#define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
154#define GS40G_MAC_LB 0x4140 151#define GS40G_MAC_LB 0x4140
155#define GS40G_MAC_SPEED_1G 0X0006 152#define GS40G_MAC_SPEED_1G 0X0006
156#define GS40G_COPPER_SPEC 0x0010 153#define GS40G_COPPER_SPEC 0x0010
157#define GS40G_CS_POWER_DOWN 0x0002
158#define GS40G_LINE_LB 0x4000 154#define GS40G_LINE_LB 0x4000
159 155
160/* SFP modules ID memory locations */ 156/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..1cc4b1a7e597 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#ifndef _E1000_REGS_H_ 24#ifndef _E1000_REGS_H_
28#define _E1000_REGS_H_ 25#define _E1000_REGS_H_
@@ -195,6 +192,10 @@
195 : (0x0E038 + ((_n) * 0x40))) 192 : (0x0E038 + ((_n) * 0x40)))
196#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ 193#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
197 : (0x0E03C + ((_n) * 0x40))) 194 : (0x0E03C + ((_n) * 0x40)))
195
196#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
197#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
198
198#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 199#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
199#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 200#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
200#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ 201#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -301,9 +302,9 @@
301#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ 302#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
302#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) 303#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
303#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 304#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
304 (0x054E0 + ((_i - 16) * 8))) 305 (0x054E0 + ((_i - 16) * 8)))
305#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 306#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
306 (0x054E4 + ((_i - 16) * 8))) 307 (0x054E4 + ((_i - 16) * 8)))
307#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) 308#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
308#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) 309#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
309#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) 310#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +359,7 @@
358#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 359#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
359#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 360#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
360#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) 361#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
361#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 362#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
362 * Filter - RW */
363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) 363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
364 364
365struct e1000_hw; 365struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
27 23
28/* Linux PRO/1000 Ethernet Driver main header file */ 24/* Linux PRO/1000 Ethernet Driver main header file */
29 25
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
198 unsigned int bytecount; 194 unsigned int bytecount;
199 u16 gso_segs; 195 u16 gso_segs;
200 __be16 protocol; 196 __be16 protocol;
197
201 DEFINE_DMA_UNMAP_ADDR(dma); 198 DEFINE_DMA_UNMAP_ADDR(dma);
202 DEFINE_DMA_UNMAP_LEN(len); 199 DEFINE_DMA_UNMAP_LEN(len);
203 u32 tx_flags; 200 u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..c737d1f40838 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27/* ethtool support for igb */ 24/* ethtool support for igb */
28 25
@@ -144,6 +141,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 141 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
145 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 142 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
146 u32 status; 143 u32 status;
144 u32 speed;
147 145
148 status = rd32(E1000_STATUS); 146 status = rd32(E1000_STATUS);
149 if (hw->phy.media_type == e1000_media_type_copper) { 147 if (hw->phy.media_type == e1000_media_type_copper) {
@@ -218,13 +216,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
218 if (status & E1000_STATUS_LU) { 216 if (status & E1000_STATUS_LU) {
219 if ((status & E1000_STATUS_2P5_SKU) && 217 if ((status & E1000_STATUS_2P5_SKU) &&
220 !(status & E1000_STATUS_2P5_SKU_OVER)) { 218 !(status & E1000_STATUS_2P5_SKU_OVER)) {
221 ecmd->speed = SPEED_2500; 219 speed = SPEED_2500;
222 } else if (status & E1000_STATUS_SPEED_1000) { 220 } else if (status & E1000_STATUS_SPEED_1000) {
223 ecmd->speed = SPEED_1000; 221 speed = SPEED_1000;
224 } else if (status & E1000_STATUS_SPEED_100) { 222 } else if (status & E1000_STATUS_SPEED_100) {
225 ecmd->speed = SPEED_100; 223 speed = SPEED_100;
226 } else { 224 } else {
227 ecmd->speed = SPEED_10; 225 speed = SPEED_10;
228 } 226 }
229 if ((status & E1000_STATUS_FD) || 227 if ((status & E1000_STATUS_FD) ||
230 hw->phy.media_type != e1000_media_type_copper) 228 hw->phy.media_type != e1000_media_type_copper)
@@ -232,9 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
232 else 230 else
233 ecmd->duplex = DUPLEX_HALF; 231 ecmd->duplex = DUPLEX_HALF;
234 } else { 232 } else {
235 ecmd->speed = -1; 233 speed = SPEED_UNKNOWN;
236 ecmd->duplex = -1; 234 ecmd->duplex = DUPLEX_UNKNOWN;
237 } 235 }
236 ethtool_cmd_speed_set(ecmd, speed);
238 if ((hw->phy.media_type == e1000_media_type_fiber) || 237 if ((hw->phy.media_type == e1000_media_type_fiber) ||
239 hw->mac.autoneg) 238 hw->mac.autoneg)
240 ecmd->autoneg = AUTONEG_ENABLE; 239 ecmd->autoneg = AUTONEG_ENABLE;
@@ -286,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
286 } 285 }
287 286
288 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 287 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
289 msleep(1); 288 usleep_range(1000, 2000);
290 289
291 if (ecmd->autoneg == AUTONEG_ENABLE) { 290 if (ecmd->autoneg == AUTONEG_ENABLE) {
292 hw->mac.autoneg = 1; 291 hw->mac.autoneg = 1;
@@ -399,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
399 adapter->fc_autoneg = pause->autoneg; 398 adapter->fc_autoneg = pause->autoneg;
400 399
401 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 400 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
402 msleep(1); 401 usleep_range(1000, 2000);
403 402
404 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 403 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
405 hw->fc.requested_mode = e1000_fc_default; 404 hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
886 } 885 }
887 886
888 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 887 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
889 msleep(1); 888 usleep_range(1000, 2000);
890 889
891 if (!netif_running(adapter->netdev)) { 890 if (!netif_running(adapter->netdev)) {
892 for (i = 0; i < adapter->num_tx_queues; i++) 891 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
1060 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1059 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1061 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1060 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1062 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1061 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1063 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1062 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1064 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1063 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1065 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1064 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1066 { E1000_RA, 0, 16, TABLE64_TEST_LO, 1065 { E1000_RA, 0, 16, TABLE64_TEST_LO,
1067 0xFFFFFFFF, 0xFFFFFFFF }, 1066 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
1103 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1102 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1104 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1103 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1105 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1104 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1106 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1105 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1107 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1106 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1108 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1107 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1109 { E1000_RA, 0, 16, TABLE64_TEST_LO, 1108 { E1000_RA, 0, 16, TABLE64_TEST_LO,
1110 0xFFFFFFFF, 0xFFFFFFFF }, 1109 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
1132 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1131 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1133 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 1132 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1134 /* Enable all RX queues before testing. */ 1133 /* Enable all RX queues before testing. */
1135 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1134 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
1136 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1135 E1000_RXDCTL_QUEUE_ENABLE },
1136 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
1137 E1000_RXDCTL_QUEUE_ENABLE },
1137 /* RDH is read-only for 82576, only test RDT. */ 1138 /* RDH is read-only for 82576, only test RDT. */
1138 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1139 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1139 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1140 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
1149 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1150 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1150 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 1151 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1151 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1152 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1152 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 1153 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1153 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 1154 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1154 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1155 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1155 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1156 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1156 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 1157 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1157 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1158 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1158 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 1159 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1159 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1160 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1160 { 0, 0, 0, 0 } 1161 { 0, 0, 0, 0 }
1161}; 1162};
1162 1163
@@ -1170,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
1170 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1171 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1171 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1172 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1172 /* Enable all four RX queues before testing. */ 1173 /* Enable all four RX queues before testing. */
1173 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 1174 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
1175 E1000_RXDCTL_QUEUE_ENABLE },
1174 /* RDH is read-only for 82575, only test RDT. */ 1176 /* RDH is read-only for 82575, only test RDT. */
1175 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1177 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1176 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 1178 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1196{ 1198{
1197 struct e1000_hw *hw = &adapter->hw; 1199 struct e1000_hw *hw = &adapter->hw;
1198 u32 pat, val; 1200 u32 pat, val;
1199 static const u32 _test[] = 1201 static const u32 _test[] = {
1200 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1202 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1201 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1203 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1202 wr32(reg, (_test[pat] & write)); 1204 wr32(reg, (_test[pat] & write));
1203 val = rd32(reg) & mask; 1205 val = rd32(reg) & mask;
@@ -1206,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1206 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 1208 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1207 reg, val, (_test[pat] & write & mask)); 1209 reg, val, (_test[pat] & write & mask));
1208 *data = reg; 1210 *data = reg;
1209 return 1; 1211 return true;
1210 } 1212 }
1211 } 1213 }
1212 1214
1213 return 0; 1215 return false;
1214} 1216}
1215 1217
1216static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 1218static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1218{ 1220{
1219 struct e1000_hw *hw = &adapter->hw; 1221 struct e1000_hw *hw = &adapter->hw;
1220 u32 val; 1222 u32 val;
1223
1221 wr32(reg, write & mask); 1224 wr32(reg, write & mask);
1222 val = rd32(reg); 1225 val = rd32(reg);
1223 if ((write & mask) != (val & mask)) { 1226 if ((write & mask) != (val & mask)) {
1224 dev_err(&adapter->pdev->dev, 1227 dev_err(&adapter->pdev->dev,
1225 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, 1228 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1226 (val & mask), (write & mask)); 1229 reg, (val & mask), (write & mask));
1227 *data = reg; 1230 *data = reg;
1228 return 1; 1231 return true;
1229 } 1232 }
1230 1233
1231 return 0; 1234 return false;
1232} 1235}
1233 1236
1234#define REG_PATTERN_TEST(reg, mask, write) \ 1237#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1390,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1387 /* Hook up test interrupt handler just for this test */ 1390 /* Hook up test interrupt handler just for this test */
1388 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1391 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1389 if (request_irq(adapter->msix_entries[0].vector, 1392 if (request_irq(adapter->msix_entries[0].vector,
1390 igb_test_intr, 0, netdev->name, adapter)) { 1393 igb_test_intr, 0, netdev->name, adapter)) {
1391 *data = 1; 1394 *data = 1;
1392 return -1; 1395 return -1;
1393 } 1396 }
1394 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1397 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1395 shared_int = false; 1398 shared_int = false;
1396 if (request_irq(irq, 1399 if (request_irq(irq,
1397 igb_test_intr, 0, netdev->name, adapter)) { 1400 igb_test_intr, 0, netdev->name, adapter)) {
1398 *data = 1; 1401 *data = 1;
1399 return -1; 1402 return -1;
1400 } 1403 }
@@ -1412,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1412 /* Disable all the interrupts */ 1415 /* Disable all the interrupts */
1413 wr32(E1000_IMC, ~0); 1416 wr32(E1000_IMC, ~0);
1414 wrfl(); 1417 wrfl();
1415 msleep(10); 1418 usleep_range(10000, 11000);
1416 1419
1417 /* Define all writable bits for ICS */ 1420 /* Define all writable bits for ICS */
1418 switch (hw->mac.type) { 1421 switch (hw->mac.type) {
@@ -1459,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1459 wr32(E1000_IMC, mask); 1462 wr32(E1000_IMC, mask);
1460 wr32(E1000_ICS, mask); 1463 wr32(E1000_ICS, mask);
1461 wrfl(); 1464 wrfl();
1462 msleep(10); 1465 usleep_range(10000, 11000);
1463 1466
1464 if (adapter->test_icr & mask) { 1467 if (adapter->test_icr & mask) {
1465 *data = 3; 1468 *data = 3;
@@ -1481,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1481 wr32(E1000_IMS, mask); 1484 wr32(E1000_IMS, mask);
1482 wr32(E1000_ICS, mask); 1485 wr32(E1000_ICS, mask);
1483 wrfl(); 1486 wrfl();
1484 msleep(10); 1487 usleep_range(10000, 11000);
1485 1488
1486 if (!(adapter->test_icr & mask)) { 1489 if (!(adapter->test_icr & mask)) {
1487 *data = 4; 1490 *data = 4;
@@ -1503,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1503 wr32(E1000_IMC, ~mask); 1506 wr32(E1000_IMC, ~mask);
1504 wr32(E1000_ICS, ~mask); 1507 wr32(E1000_ICS, ~mask);
1505 wrfl(); 1508 wrfl();
1506 msleep(10); 1509 usleep_range(10000, 11000);
1507 1510
1508 if (adapter->test_icr & mask) { 1511 if (adapter->test_icr & mask) {
1509 *data = 5; 1512 *data = 5;
@@ -1515,7 +1518,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1515 /* Disable all the interrupts */ 1518 /* Disable all the interrupts */
1516 wr32(E1000_IMC, ~0); 1519 wr32(E1000_IMC, ~0);
1517 wrfl(); 1520 wrfl();
1518 msleep(10); 1521 usleep_range(10000, 11000);
1519 1522
1520 /* Unhook test interrupt handler */ 1523 /* Unhook test interrupt handler */
1521 if (adapter->flags & IGB_FLAG_HAS_MSIX) 1524 if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1664,8 +1667,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1664 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1667 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1665 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1668 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1666 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || 1669 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
1667 (hw->device_id == E1000_DEV_ID_I354_SGMII)) { 1670 (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
1668 1671 (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
1669 /* Enable DH89xxCC MPHY for near end loopback */ 1672 /* Enable DH89xxCC MPHY for near end loopback */
1670 reg = rd32(E1000_MPHY_ADDR_CTL); 1673 reg = rd32(E1000_MPHY_ADDR_CTL);
1671 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | 1674 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1949,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1949 *data = 0; 1952 *data = 0;
1950 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1953 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1951 int i = 0; 1954 int i = 0;
1955
1952 hw->mac.serdes_has_link = false; 1956 hw->mac.serdes_has_link = false;
1953 1957
1954 /* On some blade server designs, link establishment 1958 /* On some blade server designs, link establishment
@@ -2413,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2413 switch (cmd->flow_type) { 2417 switch (cmd->flow_type) {
2414 case TCP_V4_FLOW: 2418 case TCP_V4_FLOW:
2415 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2419 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2420 /* Fall through */
2416 case UDP_V4_FLOW: 2421 case UDP_V4_FLOW:
2417 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) 2422 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2418 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2424 /* Fall through */
2419 case SCTP_V4_FLOW: 2425 case SCTP_V4_FLOW:
2420 case AH_ESP_V4_FLOW: 2426 case AH_ESP_V4_FLOW:
2421 case AH_V4_FLOW: 2427 case AH_V4_FLOW:
@@ -2425,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2425 break; 2431 break;
2426 case TCP_V6_FLOW: 2432 case TCP_V6_FLOW:
2427 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2433 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2434 /* Fall through */
2428 case UDP_V6_FLOW: 2435 case UDP_V6_FLOW:
2429 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) 2436 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2430 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2437 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2438 /* Fall through */
2431 case SCTP_V6_FLOW: 2439 case SCTP_V6_FLOW:
2432 case AH_ESP_V6_FLOW: 2440 case AH_ESP_V6_FLOW:
2433 case AH_V6_FLOW: 2441 case AH_V6_FLOW:
@@ -2730,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
2730{ 2738{
2731 struct igb_adapter *adapter = netdev_priv(netdev); 2739 struct igb_adapter *adapter = netdev_priv(netdev);
2732 struct e1000_hw *hw = &adapter->hw; 2740 struct e1000_hw *hw = &adapter->hw;
2733 u32 status = E1000_SUCCESS; 2741 u32 status = 0;
2734 u16 sff8472_rev, addr_mode; 2742 u16 sff8472_rev, addr_mode;
2735 bool page_swap = false; 2743 bool page_swap = false;
2736 2744
@@ -2740,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
2740 2748
2741 /* Check whether we support SFF-8472 or not */ 2749 /* Check whether we support SFF-8472 or not */
2742 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 2750 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
2743 if (status != E1000_SUCCESS) 2751 if (status)
2744 return -EIO; 2752 return -EIO;
2745 2753
2746 /* addressing mode is not supported */ 2754 /* addressing mode is not supported */
2747 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 2755 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
2748 if (status != E1000_SUCCESS) 2756 if (status)
2749 return -EIO; 2757 return -EIO;
2750 2758
2751 /* addressing mode is not supported */ 2759 /* addressing mode is not supported */
@@ -2772,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2772{ 2780{
2773 struct igb_adapter *adapter = netdev_priv(netdev); 2781 struct igb_adapter *adapter = netdev_priv(netdev);
2774 struct e1000_hw *hw = &adapter->hw; 2782 struct e1000_hw *hw = &adapter->hw;
2775 u32 status = E1000_SUCCESS; 2783 u32 status = 0;
2776 u16 *dataword; 2784 u16 *dataword;
2777 u16 first_word, last_word; 2785 u16 first_word, last_word;
2778 int i = 0; 2786 int i = 0;
@@ -2791,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2791 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 2799 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2792 for (i = 0; i < last_word - first_word + 1; i++) { 2800 for (i = 0; i < last_word - first_word + 1; i++) {
2793 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); 2801 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2794 if (status != E1000_SUCCESS) { 2802 if (status) {
2795 /* Error occurred while reading module */ 2803 /* Error occurred while reading module */
2796 kfree(dataword); 2804 kfree(dataword);
2797 return -EIO; 2805 return -EIO;
@@ -2824,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
2824 return IGB_RETA_SIZE; 2832 return IGB_RETA_SIZE;
2825} 2833}
2826 2834
2827static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) 2835static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
2828{ 2836{
2829 struct igb_adapter *adapter = netdev_priv(netdev); 2837 struct igb_adapter *adapter = netdev_priv(netdev);
2830 int i; 2838 int i;
@@ -2870,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
2870 } 2878 }
2871} 2879}
2872 2880
2873static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) 2881static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
2882 const u8 *key)
2874{ 2883{
2875 struct igb_adapter *adapter = netdev_priv(netdev); 2884 struct igb_adapter *adapter = netdev_priv(netdev);
2876 struct e1000_hw *hw = &adapter->hw; 2885 struct e1000_hw *hw = &adapter->hw;
@@ -3019,8 +3028,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
3019 .get_module_info = igb_get_module_info, 3028 .get_module_info = igb_get_module_info,
3020 .get_module_eeprom = igb_get_module_eeprom, 3029 .get_module_eeprom = igb_get_module_eeprom,
3021 .get_rxfh_indir_size = igb_get_rxfh_indir_size, 3030 .get_rxfh_indir_size = igb_get_rxfh_indir_size,
3022 .get_rxfh_indir = igb_get_rxfh_indir, 3031 .get_rxfh = igb_get_rxfh,
3023 .set_rxfh_indir = igb_set_rxfh_indir, 3032 .set_rxfh = igb_set_rxfh,
3024 .get_channels = igb_get_channels, 3033 .get_channels = igb_get_channels,
3025 .set_channels = igb_set_channels, 3034 .set_channels = igb_set_channels,
3026 .begin = igb_ethtool_begin, 3035 .begin = igb_ethtool_begin,
@@ -3029,5 +3038,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
3029 3038
3030void igb_set_ethtool_ops(struct net_device *netdev) 3039void igb_set_ethtool_ops(struct net_device *netdev)
3031{ 3040{
3032 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); 3041 netdev->ethtool_ops = &igb_ethtool_ops;
3033} 3042}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#include "igb.h" 24#include "igb.h"
28#include "e1000_82575.h" 25#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..f145adbb55ac 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
1/******************************************************************************* 1/* Intel(R) Gigabit Ethernet Linux driver
2 2 * Copyright(c) 2007-2014 Intel Corporation.
3 Intel(R) Gigabit Ethernet Linux driver 3 *
4 Copyright(c) 2007-2014 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * You should have received a copy of the GNU General Public License along with
14 14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, see <http://www.gnu.org/licenses/>. 16 * The full GNU General Public License is included in this distribution in
17 17 * the file called "COPYING".
18 The full GNU General Public License is included in this distribution in 18 *
19 the file called "COPYING". 19 * Contact Information:
20 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 Contact Information: 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 22 */
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 23
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 25
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
75 [board_82575] = &e1000_82575_info, 72 [board_82575] = &e1000_82575_info,
76}; 73};
77 74
78static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 75static const struct pci_device_id igb_pci_tbl[] = {
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
117 114
118MODULE_DEVICE_TABLE(pci, igb_pci_tbl); 115MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
119 116
120void igb_reset(struct igb_adapter *);
121static int igb_setup_all_tx_resources(struct igb_adapter *); 117static int igb_setup_all_tx_resources(struct igb_adapter *);
122static int igb_setup_all_rx_resources(struct igb_adapter *); 118static int igb_setup_all_rx_resources(struct igb_adapter *);
123static void igb_free_all_tx_resources(struct igb_adapter *); 119static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
141static void igb_watchdog_task(struct work_struct *); 137static void igb_watchdog_task(struct work_struct *);
142static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); 138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
143static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, 139static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
144 struct rtnl_link_stats64 *stats); 140 struct rtnl_link_stats64 *stats);
145static int igb_change_mtu(struct net_device *, int); 141static int igb_change_mtu(struct net_device *, int);
146static int igb_set_mac(struct net_device *, void *); 142static int igb_set_mac(struct net_device *, void *);
147static void igb_set_uta(struct igb_adapter *adapter); 143static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
159static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 155static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
160static void igb_tx_timeout(struct net_device *); 156static void igb_tx_timeout(struct net_device *);
161static void igb_reset_task(struct work_struct *); 157static void igb_reset_task(struct work_struct *);
162static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); 158static void igb_vlan_mode(struct net_device *netdev,
159 netdev_features_t features);
163static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); 160static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
164static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); 161static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
165static void igb_restore_vlan(struct igb_adapter *); 162static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
172static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); 169static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
173static int igb_ndo_set_vf_vlan(struct net_device *netdev, 170static int igb_ndo_set_vf_vlan(struct net_device *netdev,
174 int vf, u16 vlan, u8 qos); 171 int vf, u16 vlan, u8 qos);
175static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 172static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
176static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, 173static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
177 bool setting); 174 bool setting);
178static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 175static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
215static void igb_netpoll(struct net_device *); 212static void igb_netpoll(struct net_device *);
216#endif 213#endif
217#ifdef CONFIG_PCI_IOV 214#ifdef CONFIG_PCI_IOV
218static unsigned int max_vfs = 0; 215static unsigned int max_vfs;
219module_param(max_vfs, uint, 0); 216module_param(max_vfs, uint, 0);
220MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 217MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
221 "per physical function");
222#endif /* CONFIG_PCI_IOV */ 218#endif /* CONFIG_PCI_IOV */
223 219
224static pci_ers_result_t igb_io_error_detected(struct pci_dev *, 220static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
384 /* Print netdevice Info */ 380 /* Print netdevice Info */
385 if (netdev) { 381 if (netdev) {
386 dev_info(&adapter->pdev->dev, "Net device Info\n"); 382 dev_info(&adapter->pdev->dev, "Net device Info\n");
387 pr_info("Device Name state trans_start " 383 pr_info("Device Name state trans_start last_rx\n");
388 "last_rx\n");
389 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, 384 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
390 netdev->state, netdev->trans_start, netdev->last_rx); 385 netdev->state, netdev->trans_start, netdev->last_rx);
391 } 386 }
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
438 pr_info("------------------------------------\n"); 433 pr_info("------------------------------------\n");
439 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 434 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
440 pr_info("------------------------------------\n"); 435 pr_info("------------------------------------\n");
441 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " 436 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
442 "[bi->dma ] leng ntw timestamp "
443 "bi->skb\n");
444 437
445 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
446 const char *next_desc; 439 const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
458 else 451 else
459 next_desc = ""; 452 next_desc = "";
460 453
461 pr_info("T [0x%03X] %016llX %016llX %016llX" 454 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
462 " %04X %p %016llX %p%s\n", i, 455 i, le64_to_cpu(u0->a),
463 le64_to_cpu(u0->a),
464 le64_to_cpu(u0->b), 456 le64_to_cpu(u0->b),
465 (u64)dma_unmap_addr(buffer_info, dma), 457 (u64)dma_unmap_addr(buffer_info, dma),
466 dma_unmap_len(buffer_info, len), 458 dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
519 pr_info("------------------------------------\n"); 511 pr_info("------------------------------------\n");
520 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
521 pr_info("------------------------------------\n"); 513 pr_info("------------------------------------\n");
522 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " 514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
523 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); 515 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
524 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
525 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
526 516
527 for (i = 0; i < rx_ring->count; i++) { 517 for (i = 0; i < rx_ring->count; i++) {
528 const char *next_desc; 518 const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
584 struct e1000_hw *hw = &adapter->hw; 574 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS); 575 s32 i2cctl = rd32(E1000_I2CPARAMS);
586 576
587 return ((i2cctl & E1000_I2C_DATA_IN) != 0); 577 return !!(i2cctl & E1000_I2C_DATA_IN);
588} 578}
589 579
590/** 580/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
648 struct e1000_hw *hw = &adapter->hw; 638 struct e1000_hw *hw = &adapter->hw;
649 s32 i2cctl = rd32(E1000_I2CPARAMS); 639 s32 i2cctl = rd32(E1000_I2CPARAMS);
650 640
651 return ((i2cctl & E1000_I2C_CLK_IN) != 0); 641 return !!(i2cctl & E1000_I2C_CLK_IN);
652} 642}
653 643
654static const struct i2c_algo_bit_data igb_i2c_algo = { 644static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
681static int __init igb_init_module(void) 671static int __init igb_init_module(void)
682{ 672{
683 int ret; 673 int ret;
674
684 pr_info("%s - version %s\n", 675 pr_info("%s - version %s\n",
685 igb_driver_string, igb_driver_version); 676 igb_driver_string, igb_driver_version);
686
687 pr_info("%s\n", igb_copyright); 677 pr_info("%s\n", igb_copyright);
688 678
689#ifdef CONFIG_IGB_DCA 679#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
736 adapter->rx_ring[i]->reg_idx = rbase_offset + 726 adapter->rx_ring[i]->reg_idx = rbase_offset +
737 Q_IDX_82576(i); 727 Q_IDX_82576(i);
738 } 728 }
729 /* Fall through */
739 case e1000_82575: 730 case e1000_82575:
740 case e1000_82580: 731 case e1000_82580:
741 case e1000_i350: 732 case e1000_i350:
742 case e1000_i354: 733 case e1000_i354:
743 case e1000_i210: 734 case e1000_i210:
744 case e1000_i211: 735 case e1000_i211:
736 /* Fall through */
745 default: 737 default:
746 for (; i < adapter->num_rx_queues; i++) 738 for (; i < adapter->num_rx_queues; i++)
747 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 739 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1292 if (adapter->hw.mac.type >= e1000_82576) 1284 if (adapter->hw.mac.type >= e1000_82576)
1293 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1285 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1294 1286
1295 /* 1287 /* On i350, i354, i210, and i211, loopback VLAN packets
1296 * On i350, i354, i210, and i211, loopback VLAN packets
1297 * have the tag byte-swapped. 1288 * have the tag byte-swapped.
1298 */ 1289 */
1299 if (adapter->hw.mac.type >= e1000_i350) 1290 if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1345 for (; v_idx < q_vectors; v_idx++) { 1336 for (; v_idx < q_vectors; v_idx++) {
1346 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1337 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1347 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1338 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1339
1348 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, 1340 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1349 tqpv, txr_idx, rqpv, rxr_idx); 1341 tqpv, txr_idx, rqpv, rxr_idx);
1350 1342
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1484 */ 1476 */
1485 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1477 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1486 u32 regval = rd32(E1000_EIAM); 1478 u32 regval = rd32(E1000_EIAM);
1479
1487 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 1480 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1488 wr32(E1000_EIMC, adapter->eims_enable_mask); 1481 wr32(E1000_EIMC, adapter->eims_enable_mask);
1489 regval = rd32(E1000_EIAC); 1482 regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1495 wrfl(); 1488 wrfl();
1496 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1489 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1497 int i; 1490 int i;
1491
1498 for (i = 0; i < adapter->num_q_vectors; i++) 1492 for (i = 0; i < adapter->num_q_vectors; i++)
1499 synchronize_irq(adapter->msix_entries[i].vector); 1493 synchronize_irq(adapter->msix_entries[i].vector);
1500 } else { 1494 } else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
1513 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 1507 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1514 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; 1508 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1515 u32 regval = rd32(E1000_EIAC); 1509 u32 regval = rd32(E1000_EIAC);
1510
1516 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 1511 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1517 regval = rd32(E1000_EIAM); 1512 regval = rd32(E1000_EIAM);
1518 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 1513 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
1745 /* notify VFs that reset has been completed */ 1740 /* notify VFs that reset has been completed */
1746 if (adapter->vfs_allocated_count) { 1741 if (adapter->vfs_allocated_count) {
1747 u32 reg_data = rd32(E1000_CTRL_EXT); 1742 u32 reg_data = rd32(E1000_CTRL_EXT);
1743
1748 reg_data |= E1000_CTRL_EXT_PFRSTD; 1744 reg_data |= E1000_CTRL_EXT_PFRSTD;
1749 wr32(E1000_CTRL_EXT, reg_data); 1745 wr32(E1000_CTRL_EXT, reg_data);
1750 } 1746 }
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
1787 wr32(E1000_TCTL, tctl); 1783 wr32(E1000_TCTL, tctl);
1788 /* flush both disables and wait for them to finish */ 1784 /* flush both disables and wait for them to finish */
1789 wrfl(); 1785 wrfl();
1790 msleep(10); 1786 usleep_range(10000, 11000);
1791 1787
1792 igb_irq_disable(adapter); 1788 igb_irq_disable(adapter);
1793 1789
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1827{ 1823{
1828 WARN_ON(in_interrupt()); 1824 WARN_ON(in_interrupt());
1829 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 1825 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1830 msleep(1); 1826 usleep_range(1000, 2000);
1831 igb_down(adapter); 1827 igb_down(adapter);
1832 igb_up(adapter); 1828 igb_up(adapter);
1833 clear_bit(__IGB_RESETTING, &adapter->state); 1829 clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
1960 /* disable receive for all VFs and wait one second */ 1956 /* disable receive for all VFs and wait one second */
1961 if (adapter->vfs_allocated_count) { 1957 if (adapter->vfs_allocated_count) {
1962 int i; 1958 int i;
1959
1963 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1960 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1964 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; 1961 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1965 1962
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
2087 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, 2084 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2088 .ndo_set_vf_mac = igb_ndo_set_vf_mac, 2085 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2089 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, 2086 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2090 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, 2087 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2091 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, 2088 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2092 .ndo_get_vf_config = igb_ndo_get_vf_config, 2089 .ndo_get_vf_config = igb_ndo_get_vf_config,
2093#ifdef CONFIG_NET_POLL_CONTROLLER 2090#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
2142 } 2139 }
2143 break; 2140 break;
2144 } 2141 }
2145 return;
2146} 2142}
2147 2143
2148/** 2144/**
@@ -2203,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
2203 **/ 2199 **/
2204static s32 igb_init_i2c(struct igb_adapter *adapter) 2200static s32 igb_init_i2c(struct igb_adapter *adapter)
2205{ 2201{
2206 s32 status = E1000_SUCCESS; 2202 s32 status = 0;
2207 2203
2208 /* I2C interface supported on i350 devices */ 2204 /* I2C interface supported on i350 devices */
2209 if (adapter->hw.mac.type != e1000_i350) 2205 if (adapter->hw.mac.type != e1000_i350)
2210 return E1000_SUCCESS; 2206 return 0;
2211 2207
2212 /* Initialize the i2c bus which is controlled by the registers. 2208 /* Initialize the i2c bus which is controlled by the registers.
2213 * This bus will use the i2c_algo_bit structue that implements 2209 * This bus will use the i2c_algo_bit structue that implements
@@ -2437,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2437 /* get firmware version for ethtool -i */ 2433 /* get firmware version for ethtool -i */
2438 igb_set_fw_version(adapter); 2434 igb_set_fw_version(adapter);
2439 2435
2436 /* configure RXPBSIZE and TXPBSIZE */
2437 if (hw->mac.type == e1000_i210) {
2438 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2439 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2440 }
2441
2440 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2442 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2441 (unsigned long) adapter); 2443 (unsigned long) adapter);
2442 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2444 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2529,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2529 } 2531 }
2530 2532
2531 /* let the f/w know that the h/w is now under the control of the 2533 /* let the f/w know that the h/w is now under the control of the
2532 * driver. */ 2534 * driver.
2535 */
2533 igb_get_hw_control(adapter); 2536 igb_get_hw_control(adapter);
2534 2537
2535 strcpy(netdev->name, "eth%d"); 2538 strcpy(netdev->name, "eth%d");
@@ -3077,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
3077 /* notify VFs that reset has been completed */ 3080 /* notify VFs that reset has been completed */
3078 if (adapter->vfs_allocated_count) { 3081 if (adapter->vfs_allocated_count) {
3079 u32 reg_data = rd32(E1000_CTRL_EXT); 3082 u32 reg_data = rd32(E1000_CTRL_EXT);
3083
3080 reg_data |= E1000_CTRL_EXT_PFRSTD; 3084 reg_data |= E1000_CTRL_EXT_PFRSTD;
3081 wr32(E1000_CTRL_EXT, reg_data); 3085 wr32(E1000_CTRL_EXT, reg_data);
3082 } 3086 }
@@ -3248,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
3248 * Configure a transmit ring after a reset. 3252 * Configure a transmit ring after a reset.
3249 **/ 3253 **/
3250void igb_configure_tx_ring(struct igb_adapter *adapter, 3254void igb_configure_tx_ring(struct igb_adapter *adapter,
3251 struct igb_ring *ring) 3255 struct igb_ring *ring)
3252{ 3256{
3253 struct e1000_hw *hw = &adapter->hw; 3257 struct e1000_hw *hw = &adapter->hw;
3254 u32 txdctl = 0; 3258 u32 txdctl = 0;
@@ -3389,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3389 3393
3390 if (adapter->rss_indir_tbl_init != num_rx_queues) { 3394 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3391 for (j = 0; j < IGB_RETA_SIZE; j++) 3395 for (j = 0; j < IGB_RETA_SIZE; j++)
3392 adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE; 3396 adapter->rss_indir_tbl[j] =
3397 (j * num_rx_queues) / IGB_RETA_SIZE;
3393 adapter->rss_indir_tbl_init = num_rx_queues; 3398 adapter->rss_indir_tbl_init = num_rx_queues;
3394 } 3399 }
3395 igb_write_rss_indir_tbl(adapter); 3400 igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3430 if (hw->mac.type > e1000_82575) { 3435 if (hw->mac.type > e1000_82575) {
3431 /* Set the default pool for the PF's first queue */ 3436 /* Set the default pool for the PF's first queue */
3432 u32 vtctl = rd32(E1000_VT_CTL); 3437 u32 vtctl = rd32(E1000_VT_CTL);
3438
3433 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 3439 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3434 E1000_VT_CTL_DISABLE_DEF_POOL); 3440 E1000_VT_CTL_DISABLE_DEF_POOL);
3435 vtctl |= adapter->vfs_allocated_count << 3441 vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3511} 3517}
3512 3518
3513static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, 3519static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3514 int vfn) 3520 int vfn)
3515{ 3521{
3516 struct e1000_hw *hw = &adapter->hw; 3522 struct e1000_hw *hw = &adapter->hw;
3517 u32 vmolr; 3523 u32 vmolr;
@@ -4058,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
4058 switch (hw->mac.type) { 4064 switch (hw->mac.type) {
4059 case e1000_82576: 4065 case e1000_82576:
4060 case e1000_i350: 4066 case e1000_i350:
4061 if (!(wvbr = rd32(E1000_WVBR))) 4067 wvbr = rd32(E1000_WVBR);
4068 if (!wvbr)
4062 return; 4069 return;
4063 break; 4070 break;
4064 default: 4071 default:
@@ -4077,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
4077 if (!adapter->wvbr) 4084 if (!adapter->wvbr)
4078 return; 4085 return;
4079 4086
4080 for(j = 0; j < adapter->vfs_allocated_count; j++) { 4087 for (j = 0; j < adapter->vfs_allocated_count; j++) {
4081 if (adapter->wvbr & (1 << j) || 4088 if (adapter->wvbr & (1 << j) ||
4082 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { 4089 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
4083 dev_warn(&adapter->pdev->dev, 4090 dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4216,15 @@ static void igb_watchdog_task(struct work_struct *work)
4209 4216
4210 if (!netif_carrier_ok(netdev)) { 4217 if (!netif_carrier_ok(netdev)) {
4211 u32 ctrl; 4218 u32 ctrl;
4219
4212 hw->mac.ops.get_speed_and_duplex(hw, 4220 hw->mac.ops.get_speed_and_duplex(hw,
4213 &adapter->link_speed, 4221 &adapter->link_speed,
4214 &adapter->link_duplex); 4222 &adapter->link_duplex);
4215 4223
4216 ctrl = rd32(E1000_CTRL); 4224 ctrl = rd32(E1000_CTRL);
4217 /* Links status message must follow this format */ 4225 /* Links status message must follow this format */
4218 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " 4226 netdev_info(netdev,
4219 "Duplex, Flow Control: %s\n", 4227 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4220 netdev->name, 4228 netdev->name,
4221 adapter->link_speed, 4229 adapter->link_speed,
4222 adapter->link_duplex == FULL_DUPLEX ? 4230 adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
4242 4250
4243 /* check for thermal sensor event */ 4251 /* check for thermal sensor event */
4244 if (igb_thermal_sensor_event(hw, 4252 if (igb_thermal_sensor_event(hw,
4245 E1000_THSTAT_LINK_THROTTLE)) { 4253 E1000_THSTAT_LINK_THROTTLE))
4246 netdev_info(netdev, "The network adapter link " 4254 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
4247 "speed was downshifted because it "
4248 "overheated\n");
4249 }
4250 4255
4251 /* adjust timeout factor according to speed/duplex */ 4256 /* adjust timeout factor according to speed/duplex */
4252 adapter->tx_timeout_factor = 1; 4257 adapter->tx_timeout_factor = 1;
@@ -4277,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
4277 /* check for thermal sensor event */ 4282 /* check for thermal sensor event */
4278 if (igb_thermal_sensor_event(hw, 4283 if (igb_thermal_sensor_event(hw,
4279 E1000_THSTAT_PWR_DOWN)) { 4284 E1000_THSTAT_PWR_DOWN)) {
4280 netdev_err(netdev, "The network adapter was " 4285 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
4281 "stopped because it overheated\n");
4282 } 4286 }
4283 4287
4284 /* Links status message must follow this format */ 4288 /* Links status message must follow this format */
4285 printk(KERN_INFO "igb: %s NIC Link is Down\n", 4289 netdev_info(netdev, "igb: %s NIC Link is Down\n",
4286 netdev->name); 4290 netdev->name);
4287 netif_carrier_off(netdev); 4291 netif_carrier_off(netdev);
4288 4292
@@ -4344,6 +4348,7 @@ static void igb_watchdog_task(struct work_struct *work)
4344 /* Cause software interrupt to ensure Rx ring is cleaned */ 4348 /* Cause software interrupt to ensure Rx ring is cleaned */
4345 if (adapter->flags & IGB_FLAG_HAS_MSIX) { 4349 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
4346 u32 eics = 0; 4350 u32 eics = 0;
4351
4347 for (i = 0; i < adapter->num_q_vectors; i++) 4352 for (i = 0; i < adapter->num_q_vectors; i++)
4348 eics |= adapter->q_vector[i]->eims_value; 4353 eics |= adapter->q_vector[i]->eims_value;
4349 wr32(E1000_EICS, eics); 4354 wr32(E1000_EICS, eics);
@@ -4483,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
4483 case low_latency: /* 50 usec aka 20000 ints/s */ 4488 case low_latency: /* 50 usec aka 20000 ints/s */
4484 if (bytes > 10000) { 4489 if (bytes > 10000) {
4485 /* this if handles the TSO accounting */ 4490 /* this if handles the TSO accounting */
4486 if (bytes/packets > 8000) { 4491 if (bytes/packets > 8000)
4487 itrval = bulk_latency; 4492 itrval = bulk_latency;
4488 } else if ((packets < 10) || ((bytes/packets) > 1200)) { 4493 else if ((packets < 10) || ((bytes/packets) > 1200))
4489 itrval = bulk_latency; 4494 itrval = bulk_latency;
4490 } else if ((packets > 35)) { 4495 else if ((packets > 35))
4491 itrval = lowest_latency; 4496 itrval = lowest_latency;
4492 }
4493 } else if (bytes/packets > 2000) { 4497 } else if (bytes/packets > 2000) {
4494 itrval = bulk_latency; 4498 itrval = bulk_latency;
4495 } else if (packets <= 2 && bytes < 512) { 4499 } else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4679,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4675 return; 4679 return;
4676 } else { 4680 } else {
4677 u8 l4_hdr = 0; 4681 u8 l4_hdr = 0;
4682
4678 switch (first->protocol) { 4683 switch (first->protocol) {
4679 case htons(ETH_P_IP): 4684 case htons(ETH_P_IP):
4680 vlan_macip_lens |= skb_network_header_len(skb); 4685 vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4962 */ 4967 */
4963 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { 4968 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4964 unsigned short f; 4969 unsigned short f;
4970
4965 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 4971 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4966 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 4972 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4967 } else { 4973 } else {
@@ -5140,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5140 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 5146 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5141 5147
5142 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 5148 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
5143 msleep(1); 5149 usleep_range(1000, 2000);
5144 5150
5145 /* igb_down has a dependency on max_frame_size */ 5151 /* igb_down has a dependency on max_frame_size */
5146 adapter->max_frame_size = max_frame; 5152 adapter->max_frame_size = max_frame;
@@ -5621,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5621 vmolr |= E1000_VMOLR_MPME; 5627 vmolr |= E1000_VMOLR_MPME;
5622 } else if (vf_data->num_vf_mc_hashes) { 5628 } else if (vf_data->num_vf_mc_hashes) {
5623 int j; 5629 int j;
5630
5624 vmolr |= E1000_VMOLR_ROMPE; 5631 vmolr |= E1000_VMOLR_ROMPE;
5625 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 5632 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5626 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 5633 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5672 5679
5673 for (i = 0; i < adapter->vfs_allocated_count; i++) { 5680 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5674 u32 vmolr = rd32(E1000_VMOLR(i)); 5681 u32 vmolr = rd32(E1000_VMOLR(i));
5682
5675 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 5683 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5676 5684
5677 vf_data = &adapter->vf_data[i]; 5685 vf_data = &adapter->vf_data[i];
@@ -5770,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5770 5778
5771 if (!adapter->vf_data[vf].vlans_enabled) { 5779 if (!adapter->vf_data[vf].vlans_enabled) {
5772 u32 size; 5780 u32 size;
5781
5773 reg = rd32(E1000_VMOLR(vf)); 5782 reg = rd32(E1000_VMOLR(vf));
5774 size = reg & E1000_VMOLR_RLPML_MASK; 5783 size = reg & E1000_VMOLR_RLPML_MASK;
5775 size += 4; 5784 size += 4;
@@ -5798,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5798 adapter->vf_data[vf].vlans_enabled--; 5807 adapter->vf_data[vf].vlans_enabled--;
5799 if (!adapter->vf_data[vf].vlans_enabled) { 5808 if (!adapter->vf_data[vf].vlans_enabled) {
5800 u32 size; 5809 u32 size;
5810
5801 reg = rd32(E1000_VMOLR(vf)); 5811 reg = rd32(E1000_VMOLR(vf));
5802 size = reg & E1000_VMOLR_RLPML_MASK; 5812 size = reg & E1000_VMOLR_RLPML_MASK;
5803 size -= 4; 5813 size -= 4;
@@ -5902,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5902 */ 5912 */
5903 if (!add && (adapter->netdev->flags & IFF_PROMISC)) { 5913 if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
5904 u32 vlvf, bits; 5914 u32 vlvf, bits;
5905
5906 int regndx = igb_find_vlvf_entry(adapter, vid); 5915 int regndx = igb_find_vlvf_entry(adapter, vid);
5916
5907 if (regndx < 0) 5917 if (regndx < 0)
5908 goto out; 5918 goto out;
5909 /* See if any other pools are set for this VLAN filter 5919 /* See if any other pools are set for this VLAN filter
@@ -6494,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6494 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 6504 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6495 6505
6496 /* transfer page from old buffer to new buffer */ 6506 /* transfer page from old buffer to new buffer */
6497 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer)); 6507 *new_buff = *old_buff;
6498 6508
6499 /* sync the buffer for use by the device */ 6509 /* sync the buffer for use by the device */
6500 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 6510 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6973,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6963 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6973 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6964 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6974 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6965 u16 vid; 6975 u16 vid;
6976
6966 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && 6977 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6967 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 6978 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6968 vid = be16_to_cpu(rx_desc->wb.upper.vlan); 6979 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7051 if (cleaned_count) 7062 if (cleaned_count)
7052 igb_alloc_rx_buffers(rx_ring, cleaned_count); 7063 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7053 7064
7054 return (total_packets < budget); 7065 return total_packets < budget;
7055} 7066}
7056 7067
7057static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 7068static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7172 break; 7183 break;
7173 case SIOCGMIIREG: 7184 case SIOCGMIIREG:
7174 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 7185 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
7175 &data->val_out)) 7186 &data->val_out))
7176 return -EIO; 7187 return -EIO;
7177 break; 7188 break;
7178 case SIOCSMIIREG: 7189 case SIOCSMIIREG:
@@ -7873,7 +7884,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7873 } 7884 }
7874} 7885}
7875 7886
7876static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 7887static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
7888 int min_tx_rate, int max_tx_rate)
7877{ 7889{
7878 struct igb_adapter *adapter = netdev_priv(netdev); 7890 struct igb_adapter *adapter = netdev_priv(netdev);
7879 struct e1000_hw *hw = &adapter->hw; 7891 struct e1000_hw *hw = &adapter->hw;
@@ -7882,15 +7894,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7882 if (hw->mac.type != e1000_82576) 7894 if (hw->mac.type != e1000_82576)
7883 return -EOPNOTSUPP; 7895 return -EOPNOTSUPP;
7884 7896
7897 if (min_tx_rate)
7898 return -EINVAL;
7899
7885 actual_link_speed = igb_link_mbps(adapter->link_speed); 7900 actual_link_speed = igb_link_mbps(adapter->link_speed);
7886 if ((vf >= adapter->vfs_allocated_count) || 7901 if ((vf >= adapter->vfs_allocated_count) ||
7887 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || 7902 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7888 (tx_rate < 0) || (tx_rate > actual_link_speed)) 7903 (max_tx_rate < 0) ||
7904 (max_tx_rate > actual_link_speed))
7889 return -EINVAL; 7905 return -EINVAL;
7890 7906
7891 adapter->vf_rate_link_speed = actual_link_speed; 7907 adapter->vf_rate_link_speed = actual_link_speed;
7892 adapter->vf_data[vf].tx_rate = (u16)tx_rate; 7908 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
7893 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); 7909 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
7894 7910
7895 return 0; 7911 return 0;
7896} 7912}
@@ -7919,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7919 wr32(reg_offset, reg_val); 7935 wr32(reg_offset, reg_val);
7920 7936
7921 adapter->vf_data[vf].spoofchk_enabled = setting; 7937 adapter->vf_data[vf].spoofchk_enabled = setting;
7922 return E1000_SUCCESS; 7938 return 0;
7923} 7939}
7924 7940
7925static int igb_ndo_get_vf_config(struct net_device *netdev, 7941static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7930,7 +7946,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
7930 return -EINVAL; 7946 return -EINVAL;
7931 ivi->vf = vf; 7947 ivi->vf = vf;
7932 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 7948 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
7933 ivi->tx_rate = adapter->vf_data[vf].tx_rate; 7949 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
7950 ivi->min_tx_rate = 0;
7934 ivi->vlan = adapter->vf_data[vf].pf_vlan; 7951 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7935 ivi->qos = adapter->vf_data[vf].pf_qos; 7952 ivi->qos = adapter->vf_data[vf].pf_qos;
7936 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; 7953 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7955,11 +7972,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7955 reg = rd32(E1000_DTXCTL); 7972 reg = rd32(E1000_DTXCTL);
7956 reg |= E1000_DTXCTL_VLAN_ADDED; 7973 reg |= E1000_DTXCTL_VLAN_ADDED;
7957 wr32(E1000_DTXCTL, reg); 7974 wr32(E1000_DTXCTL, reg);
7975 /* Fall through */
7958 case e1000_82580: 7976 case e1000_82580:
7959 /* enable replication vlan tag stripping */ 7977 /* enable replication vlan tag stripping */
7960 reg = rd32(E1000_RPLOLR); 7978 reg = rd32(E1000_RPLOLR);
7961 reg |= E1000_RPLOLR_STRVLAN; 7979 reg |= E1000_RPLOLR_STRVLAN;
7962 wr32(E1000_RPLOLR, reg); 7980 wr32(E1000_RPLOLR, reg);
7981 /* Fall through */
7963 case e1000_i350: 7982 case e1000_i350:
7964 /* none of the above registers are supported by i350 */ 7983 /* none of the above registers are supported by i350 */
7965 break; 7984 break;
@@ -8049,6 +8068,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
8049 } /* endif adapter->dmac is not disabled */ 8068 } /* endif adapter->dmac is not disabled */
8050 } else if (hw->mac.type == e1000_82580) { 8069 } else if (hw->mac.type == e1000_82580) {
8051 u32 reg = rd32(E1000_PCIEMISC); 8070 u32 reg = rd32(E1000_PCIEMISC);
8071
8052 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); 8072 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
8053 wr32(E1000_DMACR, 0); 8073 wr32(E1000_DMACR, 0);
8054 } 8074 }
@@ -8077,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8077 8097
8078 swfw_mask = E1000_SWFW_PHY0_SM; 8098 swfw_mask = E1000_SWFW_PHY0_SM;
8079 8099
8080 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) 8100 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8081 != E1000_SUCCESS)
8082 return E1000_ERR_SWFW_SYNC; 8101 return E1000_ERR_SWFW_SYNC;
8083 8102
8084 status = i2c_smbus_read_byte_data(this_client, byte_offset); 8103 status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -8088,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8088 return E1000_ERR_I2C; 8107 return E1000_ERR_I2C;
8089 else { 8108 else {
8090 *data = status; 8109 *data = status;
8091 return E1000_SUCCESS; 8110 return 0;
8092 } 8111 }
8093} 8112}
8094 8113
@@ -8113,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8113 if (!this_client) 8132 if (!this_client)
8114 return E1000_ERR_I2C; 8133 return E1000_ERR_I2C;
8115 8134
8116 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) 8135 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8117 return E1000_ERR_SWFW_SYNC; 8136 return E1000_ERR_SWFW_SYNC;
8118 status = i2c_smbus_write_byte_data(this_client, byte_offset, data); 8137 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
8119 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 8138 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -8121,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8121 if (status) 8140 if (status)
8122 return E1000_ERR_I2C; 8141 return E1000_ERR_I2C;
8123 else 8142 else
8124 return E1000_SUCCESS; 8143 return 0;
8125 8144
8126} 8145}
8127 8146
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab25e49365f7..794c139f0cc0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -360,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
360 return 0; 360 return 0;
361} 361}
362 362
363static int igb_ptp_enable(struct ptp_clock_info *ptp, 363static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
364 struct ptp_clock_request *rq, int on) 364 struct ptp_clock_request *rq, int on)
365{ 365{
366 return -EOPNOTSUPP; 366 return -EOPNOTSUPP;
367} 367}
@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 559 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
560 -EFAULT : 0; 560 -EFAULT : 0;
561} 561}
562
562/** 563/**
563 * igb_ptp_set_ts_config - control hardware time stamping 564 * igb_ptp_set_timestamp_mode - setup hardware for timestamping
564 * @netdev: 565 * @adapter: networking device structure
565 * @ifreq: 566 * @config: hwtstamp configuration
566 * 567 *
567 * Outgoing time stamping can be enabled and disabled. Play nice and 568 * Outgoing time stamping can be enabled and disabled. Play nice and
568 * disable it when requested, although it shouldn't case any overhead 569 * disable it when requested, although it shouldn't case any overhead
@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
575 * type has to be specified. Matching the kind of event packet is 576 * type has to be specified. Matching the kind of event packet is
576 * not supported, with the exception of "all V2 events regardless of 577 * not supported, with the exception of "all V2 events regardless of
577 * level 2 or 4". 578 * level 2 or 4".
578 **/ 579 */
579int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) 580static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
581 struct hwtstamp_config *config)
580{ 582{
581 struct igb_adapter *adapter = netdev_priv(netdev);
582 struct e1000_hw *hw = &adapter->hw; 583 struct e1000_hw *hw = &adapter->hw;
583 struct hwtstamp_config *config = &adapter->tstamp_config;
584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 584 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 585 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
586 u32 tsync_rx_cfg = 0; 586 u32 tsync_rx_cfg = 0;
@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
588 bool is_l2 = false; 588 bool is_l2 = false;
589 u32 regval; 589 u32 regval;
590 590
591 if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
592 return -EFAULT;
593
594 /* reserved for future extensions */ 591 /* reserved for future extensions */
595 if (config->flags) 592 if (config->flags)
596 return -EINVAL; 593 return -EINVAL;
@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
725 regval = rd32(E1000_RXSTMPL); 722 regval = rd32(E1000_RXSTMPL);
726 regval = rd32(E1000_RXSTMPH); 723 regval = rd32(E1000_RXSTMPH);
727 724
728 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 725 return 0;
726}
727
728/**
729 * igb_ptp_set_ts_config - set hardware time stamping config
730 * @netdev:
731 * @ifreq:
732 *
733 **/
734int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
735{
736 struct igb_adapter *adapter = netdev_priv(netdev);
737 struct hwtstamp_config config;
738 int err;
739
740 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
741 return -EFAULT;
742
743 err = igb_ptp_set_timestamp_mode(adapter, &config);
744 if (err)
745 return err;
746
747 /* save these settings for future reference */
748 memcpy(&adapter->tstamp_config, &config,
749 sizeof(adapter->tstamp_config));
750
751 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
729 -EFAULT : 0; 752 -EFAULT : 0;
730} 753}
731 754
@@ -745,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
745 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; 768 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
746 adapter->ptp_caps.gettime = igb_ptp_gettime_82576; 769 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
747 adapter->ptp_caps.settime = igb_ptp_settime_82576; 770 adapter->ptp_caps.settime = igb_ptp_settime_82576;
748 adapter->ptp_caps.enable = igb_ptp_enable; 771 adapter->ptp_caps.enable = igb_ptp_feature_enable;
749 adapter->cc.read = igb_ptp_read_82576; 772 adapter->cc.read = igb_ptp_read_82576;
750 adapter->cc.mask = CLOCKSOURCE_MASK(64); 773 adapter->cc.mask = CLOCKSOURCE_MASK(64);
751 adapter->cc.mult = 1; 774 adapter->cc.mult = 1;
@@ -765,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
765 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; 788 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
766 adapter->ptp_caps.gettime = igb_ptp_gettime_82576; 789 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
767 adapter->ptp_caps.settime = igb_ptp_settime_82576; 790 adapter->ptp_caps.settime = igb_ptp_settime_82576;
768 adapter->ptp_caps.enable = igb_ptp_enable; 791 adapter->ptp_caps.enable = igb_ptp_feature_enable;
769 adapter->cc.read = igb_ptp_read_82580; 792 adapter->cc.read = igb_ptp_read_82580;
770 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 793 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
771 adapter->cc.mult = 1; 794 adapter->cc.mult = 1;
@@ -784,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
784 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; 807 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
785 adapter->ptp_caps.gettime = igb_ptp_gettime_i210; 808 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
786 adapter->ptp_caps.settime = igb_ptp_settime_i210; 809 adapter->ptp_caps.settime = igb_ptp_settime_i210;
787 adapter->ptp_caps.enable = igb_ptp_enable; 810 adapter->ptp_caps.enable = igb_ptp_feature_enable;
788 /* Enable the timer functions by clearing bit 31. */ 811 /* Enable the timer functions by clearing bit 31. */
789 wr32(E1000_TSAUXC, 0x0); 812 wr32(E1000_TSAUXC, 0x0);
790 break; 813 break;
@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
820 wr32(E1000_IMS, E1000_IMS_TS); 843 wr32(E1000_IMS, E1000_IMS_TS);
821 } 844 }
822 845
846 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
847 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
848
823 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 849 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
824 &adapter->pdev->dev); 850 &adapter->pdev->dev);
825 if (IS_ERR(adapter->ptp_clock)) { 851 if (IS_ERR(adapter->ptp_clock)) {
@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
884 return; 910 return;
885 911
886 /* reset the tstamp_config */ 912 /* reset the tstamp_config */
887 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); 913 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
888 914
889 switch (adapter->hw.mac.type) { 915 switch (adapter->hw.mac.type) {
890 case e1000_82576: 916 case e1000_82576:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07943f4..2178f87e9f61 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
101 else 101 else
102 ecmd->duplex = DUPLEX_HALF; 102 ecmd->duplex = DUPLEX_HALF;
103 } else { 103 } else {
104 ethtool_cmd_speed_set(ecmd, -1); 104 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
105 ecmd->duplex = -1; 105 ecmd->duplex = DUPLEX_UNKNOWN;
106 } 106 }
107 107
108 ecmd->autoneg = AUTONEG_DISABLE; 108 ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
119static void igbvf_get_pauseparam(struct net_device *netdev, 119static void igbvf_get_pauseparam(struct net_device *netdev,
120 struct ethtool_pauseparam *pause) 120 struct ethtool_pauseparam *pause)
121{ 121{
122 return;
123} 122}
124 123
125static int igbvf_set_pauseparam(struct net_device *netdev, 124static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
476 475
477void igbvf_set_ethtool_ops(struct net_device *netdev) 476void igbvf_set_ethtool_ops(struct net_device *netdev)
478{ 477{
479 SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops); 478 netdev->ethtool_ops = &igbvf_ethtool_ops;
480} 479}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2f8e36..b311e9e710d2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
107 ethtool_cmd_speed_set(ecmd, SPEED_10000); 107 ethtool_cmd_speed_set(ecmd, SPEED_10000);
108 ecmd->duplex = DUPLEX_FULL; 108 ecmd->duplex = DUPLEX_FULL;
109 } else { 109 } else {
110 ethtool_cmd_speed_set(ecmd, -1); 110 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
111 ecmd->duplex = -1; 111 ecmd->duplex = DUPLEX_UNKNOWN;
112 } 112 }
113 113
114 ecmd->autoneg = AUTONEG_DISABLE; 114 ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
656 656
657void ixgb_set_ethtool_ops(struct net_device *netdev) 657void ixgb_set_ethtool_ops(struct net_device *netdev)
658{ 658{
659 SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops); 659 netdev->ethtool_ops = &ixgb_ethtool_ops;
660} 660}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..ac9f2148cdc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
155struct vf_macvlans { 155struct vf_macvlans {
156 struct list_head l; 156 struct list_head l;
157 int vf; 157 int vf;
158 int rar_entry;
159 bool free; 158 bool free;
160 bool is_macvlan; 159 bool is_macvlan;
161 u8 vf_macvlan[ETH_ALEN]; 160 u8 vf_macvlan[ETH_ALEN];
@@ -363,7 +362,7 @@ struct ixgbe_ring_container {
363 for (pos = (head).ring; pos != NULL; pos = pos->next) 362 for (pos = (head).ring; pos != NULL; pos = pos->next)
364 363
365#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 364#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
366 ? 8 : 1) 365 ? 8 : 1)
367#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 366#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
368 367
369/* MAX_Q_VECTORS of these are allocated, 368/* MAX_Q_VECTORS of these are allocated,
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
613#define MAX_MSIX_VECTORS_82598 18 612#define MAX_MSIX_VECTORS_82598 18
614#define MAX_Q_VECTORS_82598 16 613#define MAX_Q_VECTORS_82598 16
615 614
615struct ixgbe_mac_addr {
616 u8 addr[ETH_ALEN];
617 u16 queue;
618 u16 state; /* bitmask */
619};
620#define IXGBE_MAC_STATE_DEFAULT 0x1
621#define IXGBE_MAC_STATE_MODIFIED 0x2
622#define IXGBE_MAC_STATE_IN_USE 0x4
623
616#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 624#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
617#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 625#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
618 626
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
785 793
786 u32 timer_event_accumulator; 794 u32 timer_event_accumulator;
787 u32 vferr_refcount; 795 u32 vferr_refcount;
796 struct ixgbe_mac_addr *mac_table;
788 struct kobject *info_kobj; 797 struct kobject *info_kobj;
789#ifdef CONFIG_IXGBE_HWMON 798#ifdef CONFIG_IXGBE_HWMON
790 struct hwmon_buff *ixgbe_hwmon_buff; 799 struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
863int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 872int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
864int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 873int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
865 u16 subdevice_id); 874 u16 subdevice_id);
875#ifdef CONFIG_PCI_IOV
876void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
877#endif
878int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
879 u8 *addr, u16 queue);
880int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
881 u8 *addr, u16 queue);
866void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 882void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
867netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, 883netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
868 struct ixgbe_ring *); 884 struct ixgbe_ring *);
@@ -941,6 +957,7 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
941} 957}
942 958
943void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 959void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
960void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 961void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 962void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 963void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..15609331ec17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -41,10 +41,10 @@
41#define IXGBE_82598_RX_PB_SIZE 512 41#define IXGBE_82598_RX_PB_SIZE 512
42 42
43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 43static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed, 44 ixgbe_link_speed speed,
45 bool autoneg_wait_to_complete); 45 bool autoneg_wait_to_complete);
46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 46static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
47 u8 *eeprom_data); 47 u8 *eeprom_data);
48 48
49/** 49/**
50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 50 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 140 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
141 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 141 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
142 phy->ops.get_firmware_version = 142 phy->ops.get_firmware_version =
143 &ixgbe_get_phy_firmware_version_tnx; 143 &ixgbe_get_phy_firmware_version_tnx;
144 break; 144 break;
145 case ixgbe_phy_nl: 145 case ixgbe_phy_nl:
146 phy->ops.reset = &ixgbe_reset_phy_nl; 146 phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 156
157 /* Check to see if SFP+ module is supported */ 157 /* Check to see if SFP+ module is supported */
158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
159 &list_offset, 159 &list_offset,
160 &data_offset); 160 &data_offset);
161 if (ret_val != 0) { 161 if (ret_val != 0) {
162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
163 goto out; 163 goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
219 * Determines the link capabilities by reading the AUTOC register. 219 * Determines the link capabilities by reading the AUTOC register.
220 **/ 220 **/
221static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 221static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
222 ixgbe_link_speed *speed, 222 ixgbe_link_speed *speed,
223 bool *autoneg) 223 bool *autoneg)
224{ 224{
225 s32 status = 0; 225 s32 status = 0;
226 u32 autoc = 0; 226 u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
337 int i; 337 int i;
338 bool link_up; 338 bool link_up;
339 339
340 /* 340 /* Validate the water mark configuration */
341 * Validate the water mark configuration for packet buffer 0. Zero 341 if (!hw->fc.pause_time) {
342 * water marks indicate that the packet buffer was not configured
343 * and the watermarks for packet buffer 0 should always be configured.
344 */
345 if (!hw->fc.low_water ||
346 !hw->fc.high_water[0] ||
347 !hw->fc.pause_time) {
348 hw_dbg(hw, "Invalid water mark configuration\n");
349 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 342 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
350 goto out; 343 goto out;
351 } 344 }
352 345
346 /* Low water mark of zero causes XOFF floods */
347 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
348 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
349 hw->fc.high_water[i]) {
350 if (!hw->fc.low_water[i] ||
351 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
352 hw_dbg(hw, "Invalid water mark configuration\n");
353 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
354 goto out;
355 }
356 }
357 }
358
353 /* 359 /*
354 * On 82598 having Rx FC on causes resets while doing 1G 360 * On 82598 having Rx FC on causes resets while doing 1G
355 * so if it's on turn it off once we know link_speed. For 361 * so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
432 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 438 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
433 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 439 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
434 440
435 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
436
437 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 441 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
438 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 442 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
439 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 443 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
440 hw->fc.high_water[i]) { 444 hw->fc.high_water[i]) {
445 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
441 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 446 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
442 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 447 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 448 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
468 * Restarts the link. Performs autonegotiation if needed. 473 * Restarts the link. Performs autonegotiation if needed.
469 **/ 474 **/
470static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 475static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
471 bool autoneg_wait_to_complete) 476 bool autoneg_wait_to_complete)
472{ 477{
473 u32 autoc_reg; 478 u32 autoc_reg;
474 u32 links_reg; 479 u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
550 * Reads the links register to determine if link is up and the current speed 555 * Reads the links register to determine if link is up and the current speed
551 **/ 556 **/
552static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 557static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
553 ixgbe_link_speed *speed, bool *link_up, 558 ixgbe_link_speed *speed, bool *link_up,
554 bool link_up_wait_to_complete) 559 bool link_up_wait_to_complete)
555{ 560{
556 u32 links_reg; 561 u32 links_reg;
557 u32 i; 562 u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
567 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 572 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
568 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); 573 hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
569 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, 574 hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
570 &adapt_comp_reg); 575 &adapt_comp_reg);
571 if (link_up_wait_to_complete) { 576 if (link_up_wait_to_complete) {
572 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 577 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
573 if ((link_reg & 1) && 578 if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
579 } 584 }
580 msleep(100); 585 msleep(100);
581 hw->phy.ops.read_reg(hw, 0xC79F, 586 hw->phy.ops.read_reg(hw, 0xC79F,
582 MDIO_MMD_PMAPMD, 587 MDIO_MMD_PMAPMD,
583 &link_reg); 588 &link_reg);
584 hw->phy.ops.read_reg(hw, 0xC00C, 589 hw->phy.ops.read_reg(hw, 0xC00C,
585 MDIO_MMD_PMAPMD, 590 MDIO_MMD_PMAPMD,
586 &adapt_comp_reg); 591 &adapt_comp_reg);
587 } 592 }
588 } else { 593 } else {
589 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 594 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
656 661
657 /* Set KX4/KX support according to speed requested */ 662 /* Set KX4/KX support according to speed requested */
658 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 663 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
659 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 664 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
660 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 665 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
661 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 666 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
662 autoc |= IXGBE_AUTOC_KX4_SUPP; 667 autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
689 * Sets the link speed in the AUTOC register in the MAC and restarts link. 694 * Sets the link speed in the AUTOC register in the MAC and restarts link.
690 **/ 695 **/
691static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 696static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
692 ixgbe_link_speed speed, 697 ixgbe_link_speed speed,
693 bool autoneg_wait_to_complete) 698 bool autoneg_wait_to_complete)
694{ 699{
695 s32 status; 700 s32 status;
696 701
697 /* Setup the PHY according to input speed */ 702 /* Setup the PHY according to input speed */
698 status = hw->phy.ops.setup_link_speed(hw, speed, 703 status = hw->phy.ops.setup_link_speed(hw, speed,
699 autoneg_wait_to_complete); 704 autoneg_wait_to_complete);
700 /* Set up MAC */ 705 /* Set up MAC */
701 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 706 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
702 707
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
735 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 740 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
736 /* Enable Tx Atlas so packets can be transmitted again */ 741 /* Enable Tx Atlas so packets can be transmitted again */
737 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 742 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
738 &analog_val); 743 &analog_val);
739 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 744 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
740 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 745 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
741 analog_val); 746 analog_val);
742 747
743 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 748 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
744 &analog_val); 749 &analog_val);
745 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 750 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
746 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 751 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
747 analog_val); 752 analog_val);
748 753
749 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 754 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
750 &analog_val); 755 &analog_val);
751 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 756 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
752 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 757 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
753 analog_val); 758 analog_val);
754 759
755 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 760 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
756 &analog_val); 761 &analog_val);
757 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 762 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
758 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 763 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
759 analog_val); 764 analog_val);
760 } 765 }
761 766
762 /* Reset PHY */ 767 /* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
955 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 960 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
956 for (offset = 0; offset < hw->mac.vft_size; offset++) 961 for (offset = 0; offset < hw->mac.vft_size; offset++)
957 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 962 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
958 0); 963 0);
959 964
960 return 0; 965 return 0;
961} 966}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
973 u32 atlas_ctl; 978 u32 atlas_ctl;
974 979
975 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 980 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
976 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 981 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
977 IXGBE_WRITE_FLUSH(hw); 982 IXGBE_WRITE_FLUSH(hw);
978 udelay(10); 983 udelay(10);
979 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 984 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1273 /* Setup Tx packet buffer sizes */ 1278 /* Setup Tx packet buffer sizes */
1274 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1279 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1275 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1280 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1276
1277 return;
1278} 1281}
1279 1282
1280static struct ixgbe_mac_operations mac_ops_82598 = { 1283static struct ixgbe_mac_operations mac_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index f32b3dd1ba8e..bc7c924240a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed, 48 ixgbe_link_speed speed,
49 bool autoneg_wait_to_complete); 49 bool autoneg_wait_to_complete);
50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
51 ixgbe_link_speed speed, 51 ixgbe_link_speed speed,
52 bool autoneg_wait_to_complete); 52 bool autoneg_wait_to_complete);
53static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); 53static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
55 bool autoneg_wait_to_complete); 55 bool autoneg_wait_to_complete);
56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
57 ixgbe_link_speed speed, 57 ixgbe_link_speed speed,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed speed, 60 ixgbe_link_speed speed,
61 bool autoneg_wait_to_complete); 61 bool autoneg_wait_to_complete);
62static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 62static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
63static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 63static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
64 u8 dev_addr, u8 *data); 64 u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
97 !ixgbe_mng_enabled(hw)) { 97 !ixgbe_mng_enabled(hw)) {
98 mac->ops.disable_tx_laser = 98 mac->ops.disable_tx_laser =
99 &ixgbe_disable_tx_laser_multispeed_fiber; 99 &ixgbe_disable_tx_laser_multispeed_fiber;
100 mac->ops.enable_tx_laser = 100 mac->ops.enable_tx_laser =
101 &ixgbe_enable_tx_laser_multispeed_fiber; 101 &ixgbe_enable_tx_laser_multispeed_fiber;
102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 102 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
103 } else { 103 } else {
104 mac->ops.disable_tx_laser = NULL; 104 mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
132 hw->phy.ops.reset = NULL; 132 hw->phy.ops.reset = NULL;
133 133
134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
135 &data_offset); 135 &data_offset);
136 if (ret_val != 0) 136 if (ret_val != 0)
137 goto setup_sfp_out; 137 goto setup_sfp_out;
138 138
139 /* PHY config will finish before releasing the semaphore */ 139 /* PHY config will finish before releasing the semaphore */
140 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 140 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
141 IXGBE_GSSR_MAC_CSR_SM); 141 IXGBE_GSSR_MAC_CSR_SM);
142 if (ret_val != 0) { 142 if (ret_val != 0) {
143 ret_val = IXGBE_ERR_SWFW_SYNC; 143 ret_val = IXGBE_ERR_SWFW_SYNC;
144 goto setup_sfp_out; 144 goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
334 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 334 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 335 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
336 phy->ops.get_firmware_version = 336 phy->ops.get_firmware_version =
337 &ixgbe_get_phy_firmware_version_tnx; 337 &ixgbe_get_phy_firmware_version_tnx;
338 break; 338 break;
339 default: 339 default:
340 break; 340 break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
352 * Determines the link capabilities by reading the AUTOC register. 352 * Determines the link capabilities by reading the AUTOC register.
353 **/ 353 **/
354static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 354static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
355 ixgbe_link_speed *speed, 355 ixgbe_link_speed *speed,
356 bool *autoneg) 356 bool *autoneg)
357{ 357{
358 s32 status = 0; 358 s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
543 * Restarts the link. Performs autonegotiation if needed. 543 * Restarts the link. Performs autonegotiation if needed.
544 **/ 544 **/
545static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 545static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
546 bool autoneg_wait_to_complete) 546 bool autoneg_wait_to_complete)
547{ 547{
548 u32 autoc_reg; 548 u32 autoc_reg;
549 u32 links_reg; 549 u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
672 * Set the link speed in the AUTOC register and restarts link. 672 * Set the link speed in the AUTOC register and restarts link.
673 **/ 673 **/
674static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 674static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
675 ixgbe_link_speed speed, 675 ixgbe_link_speed speed,
676 bool autoneg_wait_to_complete) 676 bool autoneg_wait_to_complete)
677{ 677{
678 s32 status = 0; 678 s32 status = 0;
679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 679 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
820 */ 820 */
821 if (speedcnt > 1) 821 if (speedcnt > 1)
822 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 822 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
823 highest_link_speed, 823 highest_link_speed,
824 autoneg_wait_to_complete); 824 autoneg_wait_to_complete);
825 825
826out: 826out:
827 /* Set autoneg_advertised value based on input link speed */ 827 /* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1010 autoc |= IXGBE_AUTOC_KX_SUPP; 1010 autoc |= IXGBE_AUTOC_KX_SUPP;
1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1014 /* Switch from 1G SFI to 10G SFI if requested */ 1014 /* Switch from 1G SFI to 10G SFI if requested */
1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1019 } 1019 }
1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1022 /* Switch from 10G SFI to 1G SFI if requested */ 1022 /* Switch from 10G SFI to 1G SFI if requested */
1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1051 } 1051 }
1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1053 status = 1053 status =
1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1055 hw_dbg(hw, "Autoneg did not complete.\n"); 1055 hw_dbg(hw, "Autoneg did not complete.\n");
1056 } 1056 }
1057 } 1057 }
@@ -1074,14 +1074,14 @@ out:
1074 * Restarts link on PHY and MAC based on settings passed in. 1074 * Restarts link on PHY and MAC based on settings passed in.
1075 **/ 1075 **/
1076static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1076static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1077 ixgbe_link_speed speed, 1077 ixgbe_link_speed speed,
1078 bool autoneg_wait_to_complete) 1078 bool autoneg_wait_to_complete)
1079{ 1079{
1080 s32 status; 1080 s32 status;
1081 1081
1082 /* Setup the PHY according to input speed */ 1082 /* Setup the PHY according to input speed */
1083 status = hw->phy.ops.setup_link_speed(hw, speed, 1083 status = hw->phy.ops.setup_link_speed(hw, speed,
1084 autoneg_wait_to_complete); 1084 autoneg_wait_to_complete);
1085 /* Set up MAC */ 1085 /* Set up MAC */
1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1086 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1087 1087
@@ -1224,7 +1224,7 @@ mac_reset_top:
1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1224 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1225 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1226 autoc2 |= (hw->mac.orig_autoc2 & 1226 autoc2 |= (hw->mac.orig_autoc2 &
1227 IXGBE_AUTOC2_UPPER_MASK); 1227 IXGBE_AUTOC2_UPPER_MASK);
1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1229 } 1229 }
1230 } 1230 }
@@ -1246,7 +1246,7 @@ mac_reset_top:
1246 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1246 /* Add the SAN MAC address to the RAR only if it's a valid address */
1247 if (is_valid_ether_addr(hw->mac.san_addr)) { 1247 if (is_valid_ether_addr(hw->mac.san_addr)) {
1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1248 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1249 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1249 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1250 1250
1251 /* Save the SAN MAC RAR index */ 1251 /* Save the SAN MAC RAR index */
1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1252 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
1257 1257
1258 /* Store the alternative WWNN/WWPN prefix */ 1258 /* Store the alternative WWNN/WWPN prefix */
1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1259 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1260 &hw->mac.wwpn_prefix); 1260 &hw->mac.wwpn_prefix);
1261 1261
1262reset_hw_out: 1262reset_hw_out:
1263 return status; 1263 return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1271{ 1271{
1272 int i; 1272 int i;
1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1273 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1274
1274 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1275 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1275 1276
1276 /* 1277 /*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1284 udelay(10); 1285 udelay(10);
1285 } 1286 }
1286 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1287 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1287 hw_dbg(hw, "Flow Director previous command isn't complete, " 1288 hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
1288 "aborting table re-initialization.\n");
1289 return IXGBE_ERR_FDIR_REINIT_FAILED; 1289 return IXGBE_ERR_FDIR_REINIT_FAILED;
1290 } 1290 }
1291 1291
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1299 * - write 0 to bit 8 of FDIRCMD register 1299 * - write 0 to bit 8 of FDIRCMD register
1300 */ 1300 */
1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1301 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1302 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1302 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1303 IXGBE_FDIRCMD_CLEARHT)); 1303 IXGBE_FDIRCMD_CLEARHT));
1304 IXGBE_WRITE_FLUSH(hw); 1304 IXGBE_WRITE_FLUSH(hw);
1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1305 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1306 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1306 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1307 ~IXGBE_FDIRCMD_CLEARHT)); 1307 ~IXGBE_FDIRCMD_CLEARHT));
1308 IXGBE_WRITE_FLUSH(hw); 1308 IXGBE_WRITE_FLUSH(hw);
1309 /* 1309 /*
1310 * Clear FDIR Hash register to clear any leftover hashes 1310 * Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1319 /* Poll init-done after we write FDIRCTRL register */ 1319 /* Poll init-done after we write FDIRCTRL register */
1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1320 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1321 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1322 IXGBE_FDIRCTRL_INIT_DONE) 1322 IXGBE_FDIRCTRL_INIT_DONE)
1323 break; 1323 break;
1324 usleep_range(1000, 2000); 1324 usleep_range(1000, 2000);
1325 } 1325 }
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1368 IXGBE_WRITE_FLUSH(hw); 1368 IXGBE_WRITE_FLUSH(hw);
1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1369 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1370 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1371 IXGBE_FDIRCTRL_INIT_DONE) 1371 IXGBE_FDIRCTRL_INIT_DONE)
1372 break; 1372 break;
1373 usleep_range(1000, 2000); 1373 usleep_range(1000, 2000);
1374 } 1374 }
@@ -1453,7 +1453,7 @@ do { \
1453 bucket_hash ^= hi_hash_dword >> n; \ 1453 bucket_hash ^= hi_hash_dword >> n; \
1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1454 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1455 sig_hash ^= hi_hash_dword << (16 - n); \ 1455 sig_hash ^= hi_hash_dword << (16 - n); \
1456} while (0); 1456} while (0)
1457 1457
1458/** 1458/**
1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1459 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1529 * @queue: queue index to direct traffic to 1529 * @queue: queue index to direct traffic to
1530 **/ 1530 **/
1531s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1531s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1532 union ixgbe_atr_hash_dword input, 1532 union ixgbe_atr_hash_dword input,
1533 union ixgbe_atr_hash_dword common, 1533 union ixgbe_atr_hash_dword common,
1534 u8 queue) 1534 u8 queue)
1535{ 1535{
1536 u64 fdirhashcmd; 1536 u64 fdirhashcmd;
1537 u32 fdircmd; 1537 u32 fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1555 1555
1556 /* configure FDIRCMD register */ 1556 /* configure FDIRCMD register */
1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1557 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1558 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1558 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1559 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1560 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1561 1561
@@ -1579,7 +1579,7 @@ do { \
1579 bucket_hash ^= lo_hash_dword >> n; \ 1579 bucket_hash ^= lo_hash_dword >> n; \
1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1580 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1581 bucket_hash ^= hi_hash_dword >> n; \ 1581 bucket_hash ^= hi_hash_dword >> n; \
1582} while (0); 1582} while (0)
1583 1583
1584/** 1584/**
1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1585 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1651static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1651static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1652{ 1652{
1653 u32 mask = ntohs(input_mask->formatted.dst_port); 1653 u32 mask = ntohs(input_mask->formatted.dst_port);
1654
1654 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1655 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1655 mask |= ntohs(input_mask->formatted.src_port); 1656 mask |= ntohs(input_mask->formatted.src_port);
1656 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1657 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1885 u32 core_ctl; 1886 u32 core_ctl;
1886 1887
1887 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1888 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1888 (reg << 8)); 1889 (reg << 8));
1889 IXGBE_WRITE_FLUSH(hw); 1890 IXGBE_WRITE_FLUSH(hw);
1890 udelay(10); 1891 udelay(10);
1891 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1892 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..4e5385a2a465 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
44 u16 count); 44 u16 count);
45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
271 **/ 271 **/
272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 272s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
273{ 273{
274 s32 ret_val;
274 u32 ctrl_ext; 275 u32 ctrl_ext;
275 276
276 /* Set the media type */ 277 /* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
292 IXGBE_WRITE_FLUSH(hw); 293 IXGBE_WRITE_FLUSH(hw);
293 294
294 /* Setup flow control */ 295 /* Setup flow control */
295 ixgbe_setup_fc(hw); 296 ret_val = ixgbe_setup_fc(hw);
297 if (!ret_val)
298 goto out;
296 299
297 /* Clear adapter stopped flag */ 300 /* Clear adapter stopped flag */
298 hw->adapter_stopped = false; 301 hw->adapter_stopped = false;
299 302
300 return 0; 303out:
304 return ret_val;
301} 305}
302 306
303/** 307/**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
481 * Reads the part number string from the EEPROM. 485 * Reads the part number string from the EEPROM.
482 **/ 486 **/
483s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 487s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
484 u32 pba_num_size) 488 u32 pba_num_size)
485{ 489{
486 s32 ret_val; 490 s32 ret_val;
487 u16 data; 491 u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
814 eeprom->address_bits = 16; 818 eeprom->address_bits = 16;
815 else 819 else
816 eeprom->address_bits = 8; 820 eeprom->address_bits = 8;
817 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " 821 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
818 "%d\n", eeprom->type, eeprom->word_size, 822 eeprom->type, eeprom->word_size, eeprom->address_bits);
819 eeprom->address_bits);
820 } 823 }
821 824
822 return 0; 825 return 0;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1388 } 1391 }
1389 1392
1390 if (i == timeout) { 1393 if (i == timeout) {
1391 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " 1394 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1392 "not granted.\n");
1393 /* 1395 /*
1394 * this release is particularly important because our attempts 1396 * this release is particularly important because our attempts
1395 * above to get the semaphore may have succeeded, and if there 1397 * above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1434 * was not granted because we don't have access to the EEPROM 1436 * was not granted because we don't have access to the EEPROM
1435 */ 1437 */
1436 if (i >= timeout) { 1438 if (i >= timeout) {
1437 hw_dbg(hw, "SWESMBI Software EEPROM semaphore " 1439 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1438 "not granted.\n");
1439 ixgbe_release_eeprom_semaphore(hw); 1440 ixgbe_release_eeprom_semaphore(hw);
1440 status = IXGBE_ERR_EEPROM; 1441 status = IXGBE_ERR_EEPROM;
1441 } 1442 }
1442 } else { 1443 } else {
1443 hw_dbg(hw, "Software semaphore SMBI between device drivers " 1444 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1444 "not granted.\n");
1445 } 1445 }
1446 1446
1447 return status; 1447 return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1483 */ 1483 */
1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1486 IXGBE_EEPROM_OPCODE_BITS); 1486 IXGBE_EEPROM_OPCODE_BITS);
1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1489 break; 1489 break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1532 * @count: number of bits to shift out 1532 * @count: number of bits to shift out
1533 **/ 1533 **/
1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1534static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1535 u16 count) 1535 u16 count)
1536{ 1536{
1537 u32 eec; 1537 u32 eec;
1538 u32 mask; 1538 u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1736 * caller does not need checksum_val, the value can be NULL. 1736 * caller does not need checksum_val, the value can be NULL.
1737 **/ 1737 **/
1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1738s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1739 u16 *checksum_val) 1739 u16 *checksum_val)
1740{ 1740{
1741 s32 status; 1741 s32 status;
1742 u16 checksum; 1742 u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1809 * Puts an ethernet address into a receive address register. 1809 * Puts an ethernet address into a receive address register.
1810 **/ 1810 **/
1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1811s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1812 u32 enable_addr) 1812 u32 enable_addr)
1813{ 1813{
1814 u32 rar_low, rar_high; 1814 u32 rar_low, rar_high;
1815 u32 rar_entries = hw->mac.num_rar_entries; 1815 u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2053 2053
2054 if (hw->addr_ctrl.mta_in_use > 0) 2054 if (hw->addr_ctrl.mta_in_use > 0)
2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2057 2057
2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2059 return 0; 2059 return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2071 2071
2072 if (a->mta_in_use > 0) 2072 if (a->mta_in_use > 0)
2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2074 hw->mac.mc_filter_type); 2074 hw->mac.mc_filter_type);
2075 2075
2076 return 0; 2076 return 0;
2077} 2077}
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2106 u32 fcrtl, fcrth; 2106 u32 fcrtl, fcrth;
2107 int i; 2107 int i;
2108 2108
2109 /* 2109 /* Validate the water mark configuration. */
2110 * Validate the water mark configuration for packet buffer 0. Zero 2110 if (!hw->fc.pause_time) {
2111 * water marks indicate that the packet buffer was not configured
2112 * and the watermarks for packet buffer 0 should always be configured.
2113 */
2114 if (!hw->fc.low_water ||
2115 !hw->fc.high_water[0] ||
2116 !hw->fc.pause_time) {
2117 hw_dbg(hw, "Invalid water mark configuration\n");
2118 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2111 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2119 goto out; 2112 goto out;
2120 } 2113 }
2121 2114
2115 /* Low water mark of zero causes XOFF floods */
2116 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2117 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2118 hw->fc.high_water[i]) {
2119 if (!hw->fc.low_water[i] ||
2120 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2121 hw_dbg(hw, "Invalid water mark configuration\n");
2122 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2123 goto out;
2124 }
2125 }
2126 }
2127
2122 /* Negotiate the fc mode to use */ 2128 /* Negotiate the fc mode to use */
2123 ixgbe_fc_autoneg(hw); 2129 ixgbe_fc_autoneg(hw);
2124 2130
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2181 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2187 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2182 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2188 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2183 2189
2184 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2185
2186 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2190 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2187 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2191 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2188 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2192 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2189 hw->fc.high_water[i]) { 2193 hw->fc.high_water[i]) {
2194 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2195 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2191 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2196 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2192 } else { 2197 } else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2654 2659
2655 /* For informational purposes only */ 2660 /* For informational purposes only */
2656 if (i >= IXGBE_MAX_SECRX_POLL) 2661 if (i >= IXGBE_MAX_SECRX_POLL)
2657 hw_dbg(hw, "Rx unit being enabled before security " 2662 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2658 "path fully disabled. Continuing with init.\n");
2659 2663
2660 return 0; 2664 return 0;
2661 2665
@@ -2782,7 +2786,7 @@ out:
2782 * get and set mac_addr routines. 2786 * get and set mac_addr routines.
2783 **/ 2787 **/
2784static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2788static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2785 u16 *san_mac_offset) 2789 u16 *san_mac_offset)
2786{ 2790{
2787 s32 ret_val; 2791 s32 ret_val;
2788 2792
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2828 hw->mac.ops.set_lan_id(hw); 2832 hw->mac.ops.set_lan_id(hw);
2829 /* apply the port offset to the address offset */ 2833 /* apply the port offset to the address offset */
2830 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2834 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2831 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2835 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2832 for (i = 0; i < 3; i++) { 2836 for (i = 0; i < 3; i++) {
2833 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2837 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2834 &san_mac_data); 2838 &san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3068 * Turn on/off specified VLAN in the VLAN filter table. 3072 * Turn on/off specified VLAN in the VLAN filter table.
3069 **/ 3073 **/
3070s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3074s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3071 bool vlan_on) 3075 bool vlan_on)
3072{ 3076{
3073 s32 regindex; 3077 s32 regindex;
3074 u32 bitindex; 3078 u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3190 * Ignore it. */ 3194 * Ignore it. */
3191 vfta_changed = false; 3195 vfta_changed = false;
3192 } 3196 }
3193 } 3197 } else {
3194 else
3195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3199 }
3196 } 3200 }
3197 3201
3198 if (vfta_changed) 3202 if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3292 * block to check the support for the alternative WWNN/WWPN prefix support. 3296 * block to check the support for the alternative WWNN/WWPN prefix support.
3293 **/ 3297 **/
3294s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3298s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3295 u16 *wwpn_prefix) 3299 u16 *wwpn_prefix)
3296{ 3300{
3297 u16 offset, caps; 3301 u16 offset, caps;
3298 u16 alt_san_mac_blk_offset; 3302 u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..2ae5d4b8fc93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
39s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); 39s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
40s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 40s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
41s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 41s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
42 u32 pba_num_size); 42 u32 pba_num_size);
43s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 43s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
44enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); 44enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
45enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); 45enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
61s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 61s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
62 u16 words, u16 *data); 62 u16 words, u16 *data);
63s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 63s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
64 u16 *data); 64 u16 *data);
65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
66 u16 words, u16 *data); 66 u16 words, u16 *data);
67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); 67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
69 u16 *checksum_val); 69 u16 *checksum_val);
70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
71 71
72s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 72s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
73 u32 enable_addr); 73 u32 enable_addr);
74s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 74s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
75s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 75s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
76s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 76s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
92s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 92s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
93s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); 93s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
94s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, 94s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
95 u32 vind, bool vlan_on); 95 u32 vind, bool vlan_on);
96s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); 96s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
97s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 97s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
98 ixgbe_link_speed *speed, 98 ixgbe_link_speed *speed,
99 bool *link_up, bool link_up_wait_to_complete); 99 bool *link_up, bool link_up_wait_to_complete);
100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
101 u16 *wwpn_prefix); 101 u16 *wwpn_prefix);
102 102
103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); 103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); 104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
141 return unlikely(!addr); 141 return unlikely(!addr);
142} 142}
143 143
144void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
145
146static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) 144static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
147{ 145{
148 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); 146 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
172} 170}
173#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) 171#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
174 172
175static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) 173u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
176{
177 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
178 u32 value;
179
180 if (ixgbe_removed(reg_addr))
181 return IXGBE_FAILED_READ_REG;
182 value = readl(reg_addr + reg);
183 if (unlikely(value == IXGBE_FAILED_READ_REG))
184 ixgbe_check_remove(hw, reg);
185 return value;
186}
187#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) 174#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
188 175
189#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ 176#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e000131b..a689ee0d4bed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
267 * Configure dcb settings and enable dcb mode. 267 * Configure dcb settings and enable dcb mode.
268 */ 268 */
269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, 269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
270 struct ixgbe_dcb_config *dcb_config) 270 struct ixgbe_dcb_config *dcb_config)
271{ 271{
272 s32 ret = 0; 272 s32 ret = 0;
273 u8 pfc_en; 273 u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
389 for (i = 0; i < MAX_USER_PRIORITY; i++) 389 for (i = 0; i < MAX_USER_PRIORITY; i++)
390 map[i] = IXGBE_RTRUP2TC_UP_MASK & 390 map[i] = IXGBE_RTRUP2TC_UP_MASK &
391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
392 return;
393} 392}
394 393
395void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) 394void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
208 208
209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); 209 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
210 210
211 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
212 /* Configure PFC Tx thresholds per TC */ 211 /* Configure PFC Tx thresholds per TC */
213 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 212 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
214 if (!(pfc_en & (1 << i))) { 213 if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
217 continue; 216 continue;
218 } 217 }
219 218
219 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
220 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 220 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
221 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 221 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
222 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 222 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
242 max_tc = prio_tc[i]; 242 max_tc = prio_tc[i];
243 } 243 }
244 244
245 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
246 245
247 /* Configure PFC Tx thresholds per TC */ 246 /* Configure PFC Tx thresholds per TC */
248 for (i = 0; i <= max_tc; i++) { 247 for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
257 256
258 if (enabled) { 257 if (enabled) {
259 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 258 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
259 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
260 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 260 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
261 } else { 261 } else {
262 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; 262 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d5a1e3db0774..90c370230e20 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -31,17 +31,17 @@
31 31
32/* DCB register definitions */ 32/* DCB register definitions */
33#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, 33#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
34 * 1 WSP - Weighted Strict Priority 34 * 1 WSP - Weighted Strict Priority
35 */ 35 */
36#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, 36#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
37 * 1 WRR - Weighted Round Robin 37 * 1 WRR - Weighted Round Robin
38 */ 38 */
39#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ 39#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
40#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ 40#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
41#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ 41#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
42#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must 42#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
43 * clear! 43 * clear!
44 */ 44 */
45#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ 45#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
46 46
47/* Receive UP2TC mapping */ 47/* Receive UP2TC mapping */
@@ -56,11 +56,11 @@
56#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ 56#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
57 57
58#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet 58#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
59 * buffers enable 59 * buffers enable
60 */ 60 */
61#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores 61#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
62 * (RSS) enable 62 * (RSS) enable
63 */ 63 */
64 64
65/* RTRPCS Bit Masks */ 65/* RTRPCS Bit Masks */
66#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ 66#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
81 81
82/* RTTPCS Bit Masks */ 82/* RTTPCS Bit Masks */
83#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, 83#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
84 * 1 SP - Strict Priority 84 * 1 SP - Strict Priority
85 */ 85 */
86#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ 86#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
87#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ 87#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
88#define IXGBE_RTTPCS_ARBD_SHIFT 22 88#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1ef27f..5172b6b12c09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
192} 192}
193 193
194static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, 194static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
195 u8 prio, u8 bwg_id, u8 bw_pct, 195 u8 prio, u8 bwg_id, u8 bw_pct,
196 u8 up_map) 196 u8 up_map)
197{ 197{
198 struct ixgbe_adapter *adapter = netdev_priv(netdev); 198 struct ixgbe_adapter *adapter = netdev_priv(netdev);
199 199
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
210} 210}
211 211
212static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 212static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
213 u8 bw_pct) 213 u8 bw_pct)
214{ 214{
215 struct ixgbe_adapter *adapter = netdev_priv(netdev); 215 struct ixgbe_adapter *adapter = netdev_priv(netdev);
216 216
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
218} 218}
219 219
220static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 220static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
221 u8 prio, u8 bwg_id, u8 bw_pct, 221 u8 prio, u8 bwg_id, u8 bw_pct,
222 u8 up_map) 222 u8 up_map)
223{ 223{
224 struct ixgbe_adapter *adapter = netdev_priv(netdev); 224 struct ixgbe_adapter *adapter = netdev_priv(netdev);
225 225
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
236} 236}
237 237
238static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 238static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
239 u8 bw_pct) 239 u8 bw_pct)
240{ 240{
241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 241 struct ixgbe_adapter *adapter = netdev_priv(netdev);
242 242
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
244} 244}
245 245
246static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 246static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
247 u8 *prio, u8 *bwg_id, u8 *bw_pct, 247 u8 *prio, u8 *bwg_id, u8 *bw_pct,
248 u8 *up_map) 248 u8 *up_map)
249{ 249{
250 struct ixgbe_adapter *adapter = netdev_priv(netdev); 250 struct ixgbe_adapter *adapter = netdev_priv(netdev);
251 251
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
256} 256}
257 257
258static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 258static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
259 u8 *bw_pct) 259 u8 *bw_pct)
260{ 260{
261 struct ixgbe_adapter *adapter = netdev_priv(netdev); 261 struct ixgbe_adapter *adapter = netdev_priv(netdev);
262 262
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
264} 264}
265 265
266static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, 266static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
267 u8 *prio, u8 *bwg_id, u8 *bw_pct, 267 u8 *prio, u8 *bwg_id, u8 *bw_pct,
268 u8 *up_map) 268 u8 *up_map)
269{ 269{
270 struct ixgbe_adapter *adapter = netdev_priv(netdev); 270 struct ixgbe_adapter *adapter = netdev_priv(netdev);
271 271
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
276} 276}
277 277
278static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 278static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
279 u8 *bw_pct) 279 u8 *bw_pct)
280{ 280{
281 struct ixgbe_adapter *adapter = netdev_priv(netdev); 281 struct ixgbe_adapter *adapter = netdev_priv(netdev);
282 282
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
284} 284}
285 285
286static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 286static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
287 u8 setting) 287 u8 setting)
288{ 288{
289 struct ixgbe_adapter *adapter = netdev_priv(netdev); 289 struct ixgbe_adapter *adapter = netdev_priv(netdev);
290 290
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
295} 295}
296 296
297static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 297static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
298 u8 *setting) 298 u8 *setting)
299{ 299{
300 struct ixgbe_adapter *adapter = netdev_priv(netdev); 300 struct ixgbe_adapter *adapter = netdev_priv(netdev);
301 301
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 472b0f450bf9..5e2c1e35e517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
253 **/ 253 **/
254void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) 254void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
255{ 255{
256 if (adapter->ixgbe_dbg_adapter) 256 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
257 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
258 adapter->ixgbe_dbg_adapter = NULL; 257 adapter->ixgbe_dbg_adapter = NULL;
259} 258}
260 259
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6c55c14d082a..a452730a3278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
142 / sizeof(u64)) 142 / sizeof(u64))
143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 IXGBE_PB_STATS_LEN + \ 144 IXGBE_PB_STATS_LEN + \
145 IXGBE_QUEUE_STATS_LEN) 145 IXGBE_QUEUE_STATS_LEN)
146 146
147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
148 "Register test (offline)", "Eeprom test (offline)", 148 "Register test (offline)", "Eeprom test (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
153 153
154static int ixgbe_get_settings(struct net_device *netdev, 154static int ixgbe_get_settings(struct net_device *netdev,
155 struct ethtool_cmd *ecmd) 155 struct ethtool_cmd *ecmd)
156{ 156{
157 struct ixgbe_adapter *adapter = netdev_priv(netdev); 157 struct ixgbe_adapter *adapter = netdev_priv(netdev);
158 struct ixgbe_hw *hw = &adapter->hw; 158 struct ixgbe_hw *hw = &adapter->hw;
@@ -161,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
161 bool autoneg = false; 161 bool autoneg = false;
162 bool link_up; 162 bool link_up;
163 163
164 /* SFP type is needed for get_link_capabilities */
165 if (hw->phy.media_type & (ixgbe_media_type_fiber |
166 ixgbe_media_type_fiber_qsfp)) {
167 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
168 hw->phy.ops.identify_sfp(hw);
169 }
170
171 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 164 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
172 165
173 /* set the supported link speeds */ 166 /* set the supported link speeds */
@@ -303,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
303 } 296 }
304 ecmd->duplex = DUPLEX_FULL; 297 ecmd->duplex = DUPLEX_FULL;
305 } else { 298 } else {
306 ethtool_cmd_speed_set(ecmd, -1); 299 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
307 ecmd->duplex = -1; 300 ecmd->duplex = DUPLEX_UNKNOWN;
308 } 301 }
309 302
310 return 0; 303 return 0;
311} 304}
312 305
313static int ixgbe_set_settings(struct net_device *netdev, 306static int ixgbe_set_settings(struct net_device *netdev,
314 struct ethtool_cmd *ecmd) 307 struct ethtool_cmd *ecmd)
315{ 308{
316 struct ixgbe_adapter *adapter = netdev_priv(netdev); 309 struct ixgbe_adapter *adapter = netdev_priv(netdev);
317 struct ixgbe_hw *hw = &adapter->hw; 310 struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
368} 361}
369 362
370static void ixgbe_get_pauseparam(struct net_device *netdev, 363static void ixgbe_get_pauseparam(struct net_device *netdev,
371 struct ethtool_pauseparam *pause) 364 struct ethtool_pauseparam *pause)
372{ 365{
373 struct ixgbe_adapter *adapter = netdev_priv(netdev); 366 struct ixgbe_adapter *adapter = netdev_priv(netdev);
374 struct ixgbe_hw *hw = &adapter->hw; 367 struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
390} 383}
391 384
392static int ixgbe_set_pauseparam(struct net_device *netdev, 385static int ixgbe_set_pauseparam(struct net_device *netdev,
393 struct ethtool_pauseparam *pause) 386 struct ethtool_pauseparam *pause)
394{ 387{
395 struct ixgbe_adapter *adapter = netdev_priv(netdev); 388 struct ixgbe_adapter *adapter = netdev_priv(netdev);
396 struct ixgbe_hw *hw = &adapter->hw; 389 struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +443,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
450#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 443#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
451 444
452static void ixgbe_get_regs(struct net_device *netdev, 445static void ixgbe_get_regs(struct net_device *netdev,
453 struct ethtool_regs *regs, void *p) 446 struct ethtool_regs *regs, void *p)
454{ 447{
455 struct ixgbe_adapter *adapter = netdev_priv(netdev); 448 struct ixgbe_adapter *adapter = netdev_priv(netdev);
456 struct ixgbe_hw *hw = &adapter->hw; 449 struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
812} 805}
813 806
814static int ixgbe_get_eeprom(struct net_device *netdev, 807static int ixgbe_get_eeprom(struct net_device *netdev,
815 struct ethtool_eeprom *eeprom, u8 *bytes) 808 struct ethtool_eeprom *eeprom, u8 *bytes)
816{ 809{
817 struct ixgbe_adapter *adapter = netdev_priv(netdev); 810 struct ixgbe_adapter *adapter = netdev_priv(netdev);
818 struct ixgbe_hw *hw = &adapter->hw; 811 struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +911,7 @@ err:
918} 911}
919 912
920static void ixgbe_get_drvinfo(struct net_device *netdev, 913static void ixgbe_get_drvinfo(struct net_device *netdev,
921 struct ethtool_drvinfo *drvinfo) 914 struct ethtool_drvinfo *drvinfo)
922{ 915{
923 struct ixgbe_adapter *adapter = netdev_priv(netdev); 916 struct ixgbe_adapter *adapter = netdev_priv(netdev);
924 u32 nvm_track_id; 917 u32 nvm_track_id;
@@ -940,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
940} 933}
941 934
942static void ixgbe_get_ringparam(struct net_device *netdev, 935static void ixgbe_get_ringparam(struct net_device *netdev,
943 struct ethtool_ringparam *ring) 936 struct ethtool_ringparam *ring)
944{ 937{
945 struct ixgbe_adapter *adapter = netdev_priv(netdev); 938 struct ixgbe_adapter *adapter = netdev_priv(netdev);
946 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 939 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
953} 946}
954 947
955static int ixgbe_set_ringparam(struct net_device *netdev, 948static int ixgbe_set_ringparam(struct net_device *netdev,
956 struct ethtool_ringparam *ring) 949 struct ethtool_ringparam *ring)
957{ 950{
958 struct ixgbe_adapter *adapter = netdev_priv(netdev); 951 struct ixgbe_adapter *adapter = netdev_priv(netdev);
959 struct ixgbe_ring *temp_ring; 952 struct ixgbe_ring *temp_ring;
@@ -1082,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1082} 1075}
1083 1076
1084static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1077static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1085 struct ethtool_stats *stats, u64 *data) 1078 struct ethtool_stats *stats, u64 *data)
1086{ 1079{
1087 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1088 struct rtnl_link_stats64 temp; 1081 struct rtnl_link_stats64 temp;
@@ -1110,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1110 } 1103 }
1111 1104
1112 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1105 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1113 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1106 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1114 } 1107 }
1115 for (j = 0; j < netdev->num_tx_queues; j++) { 1108 for (j = 0; j < netdev->num_tx_queues; j++) {
1116 ring = adapter->tx_ring[j]; 1109 ring = adapter->tx_ring[j];
@@ -1180,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1180} 1173}
1181 1174
1182static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1175static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1183 u8 *data) 1176 u8 *data)
1184{ 1177{
1185 char *p = (char *)data; 1178 char *p = (char *)data;
1186 int i; 1179 int i;
@@ -1357,8 +1350,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1357 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); 1350 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1358 val = ixgbe_read_reg(&adapter->hw, reg); 1351 val = ixgbe_read_reg(&adapter->hw, reg);
1359 if (val != (test_pattern[pat] & write & mask)) { 1352 if (val != (test_pattern[pat] & write & mask)) {
1360 e_err(drv, "pattern test reg %04X failed: got " 1353 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1361 "0x%08X expected 0x%08X\n",
1362 reg, val, (test_pattern[pat] & write & mask)); 1354 reg, val, (test_pattern[pat] & write & mask));
1363 *data = reg; 1355 *data = reg;
1364 ixgbe_write_reg(&adapter->hw, reg, before); 1356 ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1374,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1382 ixgbe_write_reg(&adapter->hw, reg, write & mask); 1374 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1383 val = ixgbe_read_reg(&adapter->hw, reg); 1375 val = ixgbe_read_reg(&adapter->hw, reg);
1384 if ((write & mask) != (val & mask)) { 1376 if ((write & mask) != (val & mask)) {
1385 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1377 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1386 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1378 reg, (val & mask), (write & mask));
1387 *data = reg; 1379 *data = reg;
1388 ixgbe_write_reg(&adapter->hw, reg, before); 1380 ixgbe_write_reg(&adapter->hw, reg, before);
1389 return true; 1381 return true;
@@ -1430,8 +1422,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1430 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); 1422 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1431 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; 1423 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1432 if (value != after) { 1424 if (value != after) {
1433 e_err(drv, "failed STATUS register test got: 0x%08X " 1425 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1434 "expected: 0x%08X\n", after, value); 1426 after, value);
1435 *data = 1; 1427 *data = 1;
1436 return 1; 1428 return 1;
1437 } 1429 }
@@ -1533,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1533 return -1; 1525 return -1;
1534 } 1526 }
1535 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1527 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1536 netdev->name, netdev)) { 1528 netdev->name, netdev)) {
1537 shared_int = false; 1529 shared_int = false;
1538 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1530 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1539 netdev->name, netdev)) { 1531 netdev->name, netdev)) {
1540 *data = 1; 1532 *data = 1;
1541 return -1; 1533 return -1;
1542 } 1534 }
@@ -1563,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1563 */ 1555 */
1564 adapter->test_icr = 0; 1556 adapter->test_icr = 0;
1565 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1566 ~mask & 0x00007FFF); 1558 ~mask & 0x00007FFF);
1567 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1559 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1568 ~mask & 0x00007FFF); 1560 ~mask & 0x00007FFF);
1569 IXGBE_WRITE_FLUSH(&adapter->hw); 1561 IXGBE_WRITE_FLUSH(&adapter->hw);
1570 usleep_range(10000, 20000); 1562 usleep_range(10000, 20000);
1571 1563
@@ -1587,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1587 IXGBE_WRITE_FLUSH(&adapter->hw); 1579 IXGBE_WRITE_FLUSH(&adapter->hw);
1588 usleep_range(10000, 20000); 1580 usleep_range(10000, 20000);
1589 1581
1590 if (!(adapter->test_icr &mask)) { 1582 if (!(adapter->test_icr & mask)) {
1591 *data = 4; 1583 *data = 4;
1592 break; 1584 break;
1593 } 1585 }
@@ -1602,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1602 */ 1594 */
1603 adapter->test_icr = 0; 1595 adapter->test_icr = 0;
1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1605 ~mask & 0x00007FFF); 1597 ~mask & 0x00007FFF);
1606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1607 ~mask & 0x00007FFF); 1599 ~mask & 0x00007FFF);
1608 IXGBE_WRITE_FLUSH(&adapter->hw); 1600 IXGBE_WRITE_FLUSH(&adapter->hw);
1609 usleep_range(10000, 20000); 1601 usleep_range(10000, 20000);
1610 1602
@@ -1964,7 +1956,7 @@ out:
1964} 1956}
1965 1957
1966static void ixgbe_diag_test(struct net_device *netdev, 1958static void ixgbe_diag_test(struct net_device *netdev,
1967 struct ethtool_test *eth_test, u64 *data) 1959 struct ethtool_test *eth_test, u64 *data)
1968{ 1960{
1969 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1961 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1970 bool if_running = netif_running(netdev); 1962 bool if_running = netif_running(netdev);
@@ -1987,10 +1979,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1987 int i; 1979 int i;
1988 for (i = 0; i < adapter->num_vfs; i++) { 1980 for (i = 0; i < adapter->num_vfs; i++) {
1989 if (adapter->vfinfo[i].clear_to_send) { 1981 if (adapter->vfinfo[i].clear_to_send) {
1990 netdev_warn(netdev, "%s", 1982 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
1991 "offline diagnostic is not "
1992 "supported when VFs are "
1993 "present\n");
1994 data[0] = 1; 1983 data[0] = 1;
1995 data[1] = 1; 1984 data[1] = 1;
1996 data[2] = 1; 1985 data[2] = 1;
@@ -2037,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
2037 * loopback diagnostic. */ 2026 * loopback diagnostic. */
2038 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 2027 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2039 IXGBE_FLAG_VMDQ_ENABLED)) { 2028 IXGBE_FLAG_VMDQ_ENABLED)) {
2040 e_info(hw, "Skip MAC loopback diagnostic in VT " 2029 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2041 "mode\n");
2042 data[3] = 0; 2030 data[3] = 0;
2043 goto skip_loopback; 2031 goto skip_loopback;
2044 } 2032 }
@@ -2078,7 +2066,7 @@ skip_ol_tests:
2078} 2066}
2079 2067
2080static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 2068static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2081 struct ethtool_wolinfo *wol) 2069 struct ethtool_wolinfo *wol)
2082{ 2070{
2083 struct ixgbe_hw *hw = &adapter->hw; 2071 struct ixgbe_hw *hw = &adapter->hw;
2084 int retval = 0; 2072 int retval = 0;
@@ -2094,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2094} 2082}
2095 2083
2096static void ixgbe_get_wol(struct net_device *netdev, 2084static void ixgbe_get_wol(struct net_device *netdev,
2097 struct ethtool_wolinfo *wol) 2085 struct ethtool_wolinfo *wol)
2098{ 2086{
2099 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2087 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2100 2088
2101 wol->supported = WAKE_UCAST | WAKE_MCAST | 2089 wol->supported = WAKE_UCAST | WAKE_MCAST |
2102 WAKE_BCAST | WAKE_MAGIC; 2090 WAKE_BCAST | WAKE_MAGIC;
2103 wol->wolopts = 0; 2091 wol->wolopts = 0;
2104 2092
2105 if (ixgbe_wol_exclusion(adapter, wol) || 2093 if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
2181} 2169}
2182 2170
2183static int ixgbe_get_coalesce(struct net_device *netdev, 2171static int ixgbe_get_coalesce(struct net_device *netdev,
2184 struct ethtool_coalesce *ec) 2172 struct ethtool_coalesce *ec)
2185{ 2173{
2186 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2174 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2187 2175
@@ -2222,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2222 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2210 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2223 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2211 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2224 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2212 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2225 e_info(probe, "rx-usecs value high enough " 2213 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2226 "to re-enable RSC\n");
2227 return true; 2214 return true;
2228 } 2215 }
2229 /* if interrupt rate is too high then disable RSC */ 2216 /* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2236} 2223}
2237 2224
2238static int ixgbe_set_coalesce(struct net_device *netdev, 2225static int ixgbe_set_coalesce(struct net_device *netdev,
2239 struct ethtool_coalesce *ec) 2226 struct ethtool_coalesce *ec)
2240{ 2227{
2241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2228 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2242 struct ixgbe_q_vector *q_vector; 2229 struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2421 switch (cmd->flow_type) { 2408 switch (cmd->flow_type) {
2422 case TCP_V4_FLOW: 2409 case TCP_V4_FLOW:
2423 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2410 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2411 /* fallthrough */
2424 case UDP_V4_FLOW: 2412 case UDP_V4_FLOW:
2425 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2413 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2426 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2414 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2415 /* fallthrough */
2427 case SCTP_V4_FLOW: 2416 case SCTP_V4_FLOW:
2428 case AH_ESP_V4_FLOW: 2417 case AH_ESP_V4_FLOW:
2429 case AH_V4_FLOW: 2418 case AH_V4_FLOW:
@@ -2433,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2433 break; 2422 break;
2434 case TCP_V6_FLOW: 2423 case TCP_V6_FLOW:
2435 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2424 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2425 /* fallthrough */
2436 case UDP_V6_FLOW: 2426 case UDP_V6_FLOW:
2437 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2427 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2438 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2428 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2429 /* fallthrough */
2439 case SCTP_V6_FLOW: 2430 case SCTP_V6_FLOW:
2440 case AH_ESP_V6_FLOW: 2431 case AH_ESP_V6_FLOW:
2441 case AH_V6_FLOW: 2432 case AH_V6_FLOW:
@@ -2787,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2787 2778
2788 if ((flags2 & UDP_RSS_FLAGS) && 2779 if ((flags2 & UDP_RSS_FLAGS) &&
2789 !(adapter->flags2 & UDP_RSS_FLAGS)) 2780 !(adapter->flags2 & UDP_RSS_FLAGS))
2790 e_warn(drv, "enabling UDP RSS: fragmented packets" 2781 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2791 " may arrive out of order to the stack above\n");
2792 2782
2793 adapter->flags2 = flags2; 2783 adapter->flags2 = flags2;
2794 2784
@@ -3099,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
3099 3089
3100void ixgbe_set_ethtool_ops(struct net_device *netdev) 3090void ixgbe_set_ethtool_ops(struct net_device *netdev)
3101{ 3091{
3102 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 3092 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3103} 3093}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
81 void *extra_ddp_buffer; 81 void *extra_ddp_buffer;
82 dma_addr_t extra_ddp_buffer_dma; 82 dma_addr_t extra_ddp_buffer_dma;
83 unsigned long mode; 83 unsigned long mode;
84#ifdef CONFIG_IXGBE_DCB
85 u8 up; 84 u8 up;
86#endif
87}; 85};
88 86
89#endif /* _IXGBE_FCOE_H */ 87#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2067d392cc3d..2d9451e39686 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1113 err = pci_enable_msi(adapter->pdev); 1113 err = pci_enable_msi(adapter->pdev);
1114 if (err) { 1114 if (err) {
1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
1116 "Unable to allocate MSI interrupt, " 1116 "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1117 "falling back to legacy. Error: %d\n", err); 1117 err);
1118 return; 1118 return;
1119 } 1119 }
1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c047c3ef8d71..f5aa3311ea28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
301 ixgbe_service_event_schedule(adapter); 301 ixgbe_service_event_schedule(adapter);
302} 302}
303 303
304void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 304static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
305{ 305{
306 u32 value; 306 u32 value;
307 307
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
320 ixgbe_remove_adapter(hw); 320 ixgbe_remove_adapter(hw);
321} 321}
322 322
323/**
324 * ixgbe_read_reg - Read from device register
325 * @hw: hw specific details
326 * @reg: offset of register to read
327 *
328 * Returns : value read or IXGBE_FAILED_READ_REG if removed
329 *
330 * This function is used to read device registers. It checks for device
331 * removal by confirming any read that returns all ones by checking the
332 * status register value for all ones. This function avoids reading from
333 * the hardware if a removal was previously detected in which case it
334 * returns IXGBE_FAILED_READ_REG (all ones).
335 */
336u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
337{
338 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
339 u32 value;
340
341 if (ixgbe_removed(reg_addr))
342 return IXGBE_FAILED_READ_REG;
343 value = readl(reg_addr + reg);
344 if (unlikely(value == IXGBE_FAILED_READ_REG))
345 ixgbe_check_remove(hw, reg);
346 return value;
347}
348
323static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) 349static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
324{ 350{
325 u16 value; 351 u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3743} 3769}
3744 3770
3745/** 3771/**
3746 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3747 * @adapter: driver data
3748 */
3749static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3750{
3751 struct ixgbe_hw *hw = &adapter->hw;
3752 u32 vlnctrl;
3753
3754 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3755 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3756 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3757}
3758
3759/**
3760 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3761 * @adapter: driver data
3762 */
3763static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3764{
3765 struct ixgbe_hw *hw = &adapter->hw;
3766 u32 vlnctrl;
3767
3768 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3769 vlnctrl |= IXGBE_VLNCTRL_VFE;
3770 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3771 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3772}
3773
3774/**
3775 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping 3772 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3776 * @adapter: driver data 3773 * @adapter: driver data
3777 */ 3774 */
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3850} 3847}
3851 3848
3852/** 3849/**
3850 * ixgbe_write_mc_addr_list - write multicast addresses to MTA
3851 * @netdev: network interface device structure
3852 *
3853 * Writes multicast address list to the MTA hash table.
3854 * Returns: -ENOMEM on failure
3855 * 0 on no addresses written
3856 * X on writing X addresses to MTA
3857 **/
3858static int ixgbe_write_mc_addr_list(struct net_device *netdev)
3859{
3860 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3861 struct ixgbe_hw *hw = &adapter->hw;
3862
3863 if (!netif_running(netdev))
3864 return 0;
3865
3866 if (hw->mac.ops.update_mc_addr_list)
3867 hw->mac.ops.update_mc_addr_list(hw, netdev);
3868 else
3869 return -ENOMEM;
3870
3871#ifdef CONFIG_PCI_IOV
3872 ixgbe_restore_vf_multicasts(adapter);
3873#endif
3874
3875 return netdev_mc_count(netdev);
3876}
3877
3878#ifdef CONFIG_PCI_IOV
3879void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
3880{
3881 struct ixgbe_hw *hw = &adapter->hw;
3882 int i;
3883 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3884 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3885 hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
3886 adapter->mac_table[i].queue,
3887 IXGBE_RAH_AV);
3888 else
3889 hw->mac.ops.clear_rar(hw, i);
3890
3891 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
3892 }
3893}
3894#endif
3895
3896static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
3897{
3898 struct ixgbe_hw *hw = &adapter->hw;
3899 int i;
3900 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3901 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
3902 if (adapter->mac_table[i].state &
3903 IXGBE_MAC_STATE_IN_USE)
3904 hw->mac.ops.set_rar(hw, i,
3905 adapter->mac_table[i].addr,
3906 adapter->mac_table[i].queue,
3907 IXGBE_RAH_AV);
3908 else
3909 hw->mac.ops.clear_rar(hw, i);
3910
3911 adapter->mac_table[i].state &=
3912 ~(IXGBE_MAC_STATE_MODIFIED);
3913 }
3914 }
3915}
3916
3917static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
3918{
3919 int i;
3920 struct ixgbe_hw *hw = &adapter->hw;
3921
3922 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3923 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3924 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3925 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
3926 adapter->mac_table[i].queue = 0;
3927 }
3928 ixgbe_sync_mac_table(adapter);
3929}
3930
3931static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
3932{
3933 struct ixgbe_hw *hw = &adapter->hw;
3934 int i, count = 0;
3935
3936 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3937 if (adapter->mac_table[i].state == 0)
3938 count++;
3939 }
3940 return count;
3941}
3942
3943/* this function destroys the first RAR entry */
3944static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
3945 u8 *addr)
3946{
3947 struct ixgbe_hw *hw = &adapter->hw;
3948
3949 memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
3950 adapter->mac_table[0].queue = VMDQ_P(0);
3951 adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
3952 IXGBE_MAC_STATE_IN_USE);
3953 hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
3954 adapter->mac_table[0].queue,
3955 IXGBE_RAH_AV);
3956}
3957
3958int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
3959{
3960 struct ixgbe_hw *hw = &adapter->hw;
3961 int i;
3962
3963 if (is_zero_ether_addr(addr))
3964 return -EINVAL;
3965
3966 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3967 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3968 continue;
3969 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
3970 IXGBE_MAC_STATE_IN_USE);
3971 ether_addr_copy(adapter->mac_table[i].addr, addr);
3972 adapter->mac_table[i].queue = queue;
3973 ixgbe_sync_mac_table(adapter);
3974 return i;
3975 }
3976 return -ENOMEM;
3977}
3978
3979int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
3980{
3981 /* search table for addr, if found, set to 0 and sync */
3982 int i;
3983 struct ixgbe_hw *hw = &adapter->hw;
3984
3985 if (is_zero_ether_addr(addr))
3986 return -EINVAL;
3987
3988 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3989 if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
3990 adapter->mac_table[i].queue == queue) {
3991 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3992 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3993 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
3994 adapter->mac_table[i].queue = 0;
3995 ixgbe_sync_mac_table(adapter);
3996 return 0;
3997 }
3998 }
3999 return -ENOMEM;
4000}
4001/**
3853 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table 4002 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3854 * @netdev: network interface device structure 4003 * @netdev: network interface device structure
3855 * 4004 *
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3858 * 0 on no addresses written 4007 * 0 on no addresses written
3859 * X on writing X addresses to the RAR table 4008 * X on writing X addresses to the RAR table
3860 **/ 4009 **/
3861static int ixgbe_write_uc_addr_list(struct net_device *netdev) 4010static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
3862{ 4011{
3863 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4012 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3864 struct ixgbe_hw *hw = &adapter->hw;
3865 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3866 int count = 0; 4013 int count = 0;
3867 4014
3868 /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
3869 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3870 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3871
3872 /* return ENOMEM indicating insufficient memory for addresses */ 4015 /* return ENOMEM indicating insufficient memory for addresses */
3873 if (netdev_uc_count(netdev) > rar_entries) 4016 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
3874 return -ENOMEM; 4017 return -ENOMEM;
3875 4018
3876 if (!netdev_uc_empty(netdev)) { 4019 if (!netdev_uc_empty(netdev)) {
3877 struct netdev_hw_addr *ha; 4020 struct netdev_hw_addr *ha;
3878 /* return error if we do not support writing to RAR table */
3879 if (!hw->mac.ops.set_rar)
3880 return -ENOMEM;
3881
3882 netdev_for_each_uc_addr(ha, netdev) { 4021 netdev_for_each_uc_addr(ha, netdev) {
3883 if (!rar_entries) 4022 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
3884 break; 4023 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
3885 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3886 VMDQ_P(0), IXGBE_RAH_AV);
3887 count++; 4024 count++;
3888 } 4025 }
3889 } 4026 }
3890 /* write the addresses in reverse order to avoid write combining */
3891 for (; rar_entries > 0 ; rar_entries--)
3892 hw->mac.ops.clear_rar(hw, rar_entries);
3893
3894 return count; 4027 return count;
3895} 4028}
3896 4029
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3908 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4041 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3909 struct ixgbe_hw *hw = &adapter->hw; 4042 struct ixgbe_hw *hw = &adapter->hw;
3910 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 4043 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4044 u32 vlnctrl;
3911 int count; 4045 int count;
3912 4046
3913 /* Check for Promiscuous and All Multicast modes */ 4047 /* Check for Promiscuous and All Multicast modes */
3914
3915 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4048 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4049 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3916 4050
3917 /* set all bits that we expect to always be set */ 4051 /* set all bits that we expect to always be set */
3918 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ 4052 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3922 4056
3923 /* clear the bits we are changing the status of */ 4057 /* clear the bits we are changing the status of */
3924 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4058 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3925 4059 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3926 if (netdev->flags & IFF_PROMISC) { 4060 if (netdev->flags & IFF_PROMISC) {
3927 hw->addr_ctrl.user_set_promisc = true; 4061 hw->addr_ctrl.user_set_promisc = true;
3928 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4062 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3929 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); 4063 vmolr |= IXGBE_VMOLR_MPE;
3930 /* Only disable hardware filter vlans in promiscuous mode 4064 /* Only disable hardware filter vlans in promiscuous mode
3931 * if SR-IOV and VMDQ are disabled - otherwise ensure 4065 * if SR-IOV and VMDQ are disabled - otherwise ensure
3932 * that hardware VLAN filters remain enabled. 4066 * that hardware VLAN filters remain enabled.
3933 */ 4067 */
3934 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 4068 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3935 IXGBE_FLAG_SRIOV_ENABLED))) 4069 IXGBE_FLAG_SRIOV_ENABLED)))
3936 ixgbe_vlan_filter_disable(adapter); 4070 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3937 else
3938 ixgbe_vlan_filter_enable(adapter);
3939 } else { 4071 } else {
3940 if (netdev->flags & IFF_ALLMULTI) { 4072 if (netdev->flags & IFF_ALLMULTI) {
3941 fctrl |= IXGBE_FCTRL_MPE; 4073 fctrl |= IXGBE_FCTRL_MPE;
3942 vmolr |= IXGBE_VMOLR_MPE; 4074 vmolr |= IXGBE_VMOLR_MPE;
3943 } 4075 }
3944 ixgbe_vlan_filter_enable(adapter); 4076 vlnctrl |= IXGBE_VLNCTRL_VFE;
3945 hw->addr_ctrl.user_set_promisc = false; 4077 hw->addr_ctrl.user_set_promisc = false;
3946 } 4078 }
3947 4079
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3950 * sufficient space to store all the addresses then enable 4082 * sufficient space to store all the addresses then enable
3951 * unicast promiscuous mode 4083 * unicast promiscuous mode
3952 */ 4084 */
3953 count = ixgbe_write_uc_addr_list(netdev); 4085 count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
3954 if (count < 0) { 4086 if (count < 0) {
3955 fctrl |= IXGBE_FCTRL_UPE; 4087 fctrl |= IXGBE_FCTRL_UPE;
3956 vmolr |= IXGBE_VMOLR_ROPE; 4088 vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3960 * then we should just turn on promiscuous mode so 4092 * then we should just turn on promiscuous mode so
3961 * that we can at least receive multicast traffic 4093 * that we can at least receive multicast traffic
3962 */ 4094 */
3963 hw->mac.ops.update_mc_addr_list(hw, netdev); 4095 count = ixgbe_write_mc_addr_list(netdev);
3964 vmolr |= IXGBE_VMOLR_ROMPE; 4096 if (count < 0) {
3965 4097 fctrl |= IXGBE_FCTRL_MPE;
3966 if (adapter->num_vfs) 4098 vmolr |= IXGBE_VMOLR_MPE;
3967 ixgbe_restore_vf_multicasts(adapter); 4099 } else if (count) {
4100 vmolr |= IXGBE_VMOLR_ROMPE;
4101 }
3968 4102
3969 if (hw->mac.type != ixgbe_mac_82598EB) { 4103 if (hw->mac.type != ixgbe_mac_82598EB) {
3970 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & 4104 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3985 /* NOTE: VLAN filtering is disabled by setting PROMISC */ 4119 /* NOTE: VLAN filtering is disabled by setting PROMISC */
3986 } 4120 }
3987 4121
4122 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3988 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4123 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3989 4124
3990 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 4125 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4101 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 4236 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4102 (pb == ixgbe_fcoe_get_tc(adapter))) 4237 (pb == ixgbe_fcoe_get_tc(adapter)))
4103 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4238 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4104
4105#endif 4239#endif
4240
4106 /* Calculate delay value for device */ 4241 /* Calculate delay value for device */
4107 switch (hw->mac.type) { 4242 switch (hw->mac.type) {
4108 case ixgbe_mac_X540: 4243 case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4143 * @adapter: board private structure to calculate for 4278 * @adapter: board private structure to calculate for
4144 * @pb: packet buffer to calculate 4279 * @pb: packet buffer to calculate
4145 */ 4280 */
4146static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) 4281static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4147{ 4282{
4148 struct ixgbe_hw *hw = &adapter->hw; 4283 struct ixgbe_hw *hw = &adapter->hw;
4149 struct net_device *dev = adapter->netdev; 4284 struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
4153 /* Calculate max LAN frame size */ 4288 /* Calculate max LAN frame size */
4154 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 4289 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4155 4290
4291#ifdef IXGBE_FCOE
4292 /* FCoE traffic class uses FCOE jumbo frames */
4293 if ((dev->features & NETIF_F_FCOE_MTU) &&
4294 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4295 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4296 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4297#endif
4298
4156 /* Calculate delay value for device */ 4299 /* Calculate delay value for device */
4157 switch (hw->mac.type) { 4300 switch (hw->mac.type) {
4158 case ixgbe_mac_X540: 4301 case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4179 if (!num_tc) 4322 if (!num_tc)
4180 num_tc = 1; 4323 num_tc = 1;
4181 4324
4182 hw->fc.low_water = ixgbe_lpbthresh(adapter);
4183
4184 for (i = 0; i < num_tc; i++) { 4325 for (i = 0; i < num_tc; i++) {
4185 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); 4326 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4327 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4186 4328
4187 /* Low water marks must not be larger than high water marks */ 4329 /* Low water marks must not be larger than high water marks */
4188 if (hw->fc.low_water > hw->fc.high_water[i]) 4330 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4189 hw->fc.low_water = 0; 4331 hw->fc.low_water[i] = 0;
4190 } 4332 }
4333
4334 for (; i < MAX_TRAFFIC_CLASS; i++)
4335 hw->fc.high_water[i] = 0;
4191} 4336}
4192 4337
4193static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) 4338static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4249 vmolr |= IXGBE_VMOLR_ROMPE; 4394 vmolr |= IXGBE_VMOLR_ROMPE;
4250 hw->mac.ops.update_mc_addr_list(hw, dev); 4395 hw->mac.ops.update_mc_addr_list(hw, dev);
4251 } 4396 }
4252 ixgbe_write_uc_addr_list(adapter->netdev); 4397 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4253 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4398 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4254} 4399}
4255 4400
4256static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4257 u8 *addr, u16 pool)
4258{
4259 struct ixgbe_hw *hw = &adapter->hw;
4260 unsigned int entry;
4261
4262 entry = hw->mac.num_rar_entries - pool;
4263 hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
4264}
4265
4266static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 4401static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4267{ 4402{
4268 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4403 struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4521,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4521 case ixgbe_phy_qsfp_active_unknown: 4656 case ixgbe_phy_qsfp_active_unknown:
4522 case ixgbe_phy_qsfp_intel: 4657 case ixgbe_phy_qsfp_intel:
4523 case ixgbe_phy_qsfp_unknown: 4658 case ixgbe_phy_qsfp_unknown:
4659 /* ixgbe_phy_none is set when no SFP module is present */
4660 case ixgbe_phy_none:
4524 return true; 4661 return true;
4525 case ixgbe_phy_nl: 4662 case ixgbe_phy_nl:
4526 if (hw->mac.type == ixgbe_mac_82598EB) 4663 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4742,7 +4879,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
4742void ixgbe_reset(struct ixgbe_adapter *adapter) 4879void ixgbe_reset(struct ixgbe_adapter *adapter)
4743{ 4880{
4744 struct ixgbe_hw *hw = &adapter->hw; 4881 struct ixgbe_hw *hw = &adapter->hw;
4882 struct net_device *netdev = adapter->netdev;
4745 int err; 4883 int err;
4884 u8 old_addr[ETH_ALEN];
4746 4885
4747 if (ixgbe_removed(hw->hw_addr)) 4886 if (ixgbe_removed(hw->hw_addr))
4748 return; 4887 return;
@@ -4778,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4778 } 4917 }
4779 4918
4780 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 4919 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4781 4920 /* do not flush user set addresses */
4782 /* reprogram the RAR[0] in case user changed it. */ 4921 memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
4783 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 4922 ixgbe_flush_sw_mac_table(adapter);
4923 ixgbe_mac_set_default_filter(adapter, old_addr);
4784 4924
4785 /* update SAN MAC vmdq pool selection */ 4925 /* update SAN MAC vmdq pool selection */
4786 if (hw->mac.san_mac_rar_index) 4926 if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5026#endif /* CONFIG_IXGBE_DCB */ 5166#endif /* CONFIG_IXGBE_DCB */
5027#endif /* IXGBE_FCOE */ 5167#endif /* IXGBE_FCOE */
5028 5168
5169 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5170 hw->mac.num_rar_entries,
5171 GFP_ATOMIC);
5172
5029 /* Set MAC specific capability flags and exceptions */ 5173 /* Set MAC specific capability flags and exceptions */
5030 switch (hw->mac.type) { 5174 switch (hw->mac.type) {
5031 case ixgbe_mac_82598EB: 5175 case ixgbe_mac_82598EB:
@@ -5517,6 +5661,17 @@ err_setup_tx:
5517 return err; 5661 return err;
5518} 5662}
5519 5663
5664static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
5665{
5666 ixgbe_ptp_suspend(adapter);
5667
5668 ixgbe_down(adapter);
5669 ixgbe_free_irq(adapter);
5670
5671 ixgbe_free_all_tx_resources(adapter);
5672 ixgbe_free_all_rx_resources(adapter);
5673}
5674
5520/** 5675/**
5521 * ixgbe_close - Disables a network interface 5676 * ixgbe_close - Disables a network interface
5522 * @netdev: network interface device structure 5677 * @netdev: network interface device structure
@@ -5534,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
5534 5689
5535 ixgbe_ptp_stop(adapter); 5690 ixgbe_ptp_stop(adapter);
5536 5691
5537 ixgbe_down(adapter); 5692 ixgbe_close_suspend(adapter);
5538 ixgbe_free_irq(adapter);
5539 5693
5540 ixgbe_fdir_filter_exit(adapter); 5694 ixgbe_fdir_filter_exit(adapter);
5541 5695
5542 ixgbe_free_all_tx_resources(adapter);
5543 ixgbe_free_all_rx_resources(adapter);
5544
5545 ixgbe_release_hw_control(adapter); 5696 ixgbe_release_hw_control(adapter);
5546 5697
5547 return 0; 5698 return 0;
@@ -5608,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5608 netif_device_detach(netdev); 5759 netif_device_detach(netdev);
5609 5760
5610 rtnl_lock(); 5761 rtnl_lock();
5611 if (netif_running(netdev)) { 5762 if (netif_running(netdev))
5612 ixgbe_down(adapter); 5763 ixgbe_close_suspend(adapter);
5613 ixgbe_free_irq(adapter);
5614 ixgbe_free_all_tx_resources(adapter);
5615 ixgbe_free_all_rx_resources(adapter);
5616 }
5617 rtnl_unlock(); 5764 rtnl_unlock();
5618 5765
5619 ixgbe_clear_interrupt_scheme(adapter); 5766 ixgbe_clear_interrupt_scheme(adapter);
@@ -5945,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5945 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6092 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5946 for (i = 0; i < adapter->num_tx_queues; i++) 6093 for (i = 0; i < adapter->num_tx_queues; i++)
5947 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 6094 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5948 &(adapter->tx_ring[i]->state)); 6095 &(adapter->tx_ring[i]->state));
5949 /* re-enable flow director interrupts */ 6096 /* re-enable flow director interrupts */
5950 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 6097 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5951 } else { 6098 } else {
@@ -7172,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
7172 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7319 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7173 struct ixgbe_hw *hw = &adapter->hw; 7320 struct ixgbe_hw *hw = &adapter->hw;
7174 struct sockaddr *addr = p; 7321 struct sockaddr *addr = p;
7322 int ret;
7175 7323
7176 if (!is_valid_ether_addr(addr->sa_data)) 7324 if (!is_valid_ether_addr(addr->sa_data))
7177 return -EADDRNOTAVAIL; 7325 return -EADDRNOTAVAIL;
7178 7326
7327 ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7179 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 7328 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7180 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 7329 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7181 7330
7182 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 7331 ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7183 7332 return ret > 0 ? 0 : ret;
7184 return 0;
7185} 7333}
7186 7334
7187static int 7335static int
@@ -7783,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7783 .ndo_do_ioctl = ixgbe_ioctl, 7931 .ndo_do_ioctl = ixgbe_ioctl,
7784 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, 7932 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7785 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 7933 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7786 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7934 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
7787 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, 7935 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
7788 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7936 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7789 .ndo_get_stats64 = ixgbe_get_stats64, 7937 .ndo_get_stats64 = ixgbe_get_stats64,
@@ -8187,6 +8335,8 @@ skip_sriov:
8187 goto err_sw_init; 8335 goto err_sw_init;
8188 } 8336 }
8189 8337
8338 ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
8339
8190 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8340 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8191 (unsigned long) adapter); 8341 (unsigned long) adapter);
8192 8342
@@ -8242,7 +8392,7 @@ skip_sriov:
8242 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 8392 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8243 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 8393 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8244 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 8394 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8245 part_str); 8395 part_str);
8246 else 8396 else
8247 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 8397 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8248 hw->mac.type, hw->phy.type, part_str); 8398 hw->mac.type, hw->phy.type, part_str);
@@ -8304,8 +8454,8 @@ skip_sriov:
8304 8454
8305 ixgbe_dbg_adapter_init(adapter); 8455 ixgbe_dbg_adapter_init(adapter);
8306 8456
8307 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 8457 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
8308 if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link) 8458 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
8309 hw->mac.ops.setup_link(hw, 8459 hw->mac.ops.setup_link(hw,
8310 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 8460 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8311 true); 8461 true);
@@ -8319,6 +8469,7 @@ err_sw_init:
8319 ixgbe_disable_sriov(adapter); 8469 ixgbe_disable_sriov(adapter);
8320 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 8470 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
8321 iounmap(adapter->io_addr); 8471 iounmap(adapter->io_addr);
8472 kfree(adapter->mac_table);
8322err_ioremap: 8473err_ioremap:
8323 free_netdev(netdev); 8474 free_netdev(netdev);
8324err_alloc_etherdev: 8475err_alloc_etherdev:
@@ -8392,6 +8543,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
8392 8543
8393 e_dev_info("complete\n"); 8544 e_dev_info("complete\n");
8394 8545
8546 kfree(adapter->mac_table);
8395 free_netdev(netdev); 8547 free_netdev(netdev);
8396 8548
8397 pci_disable_pcie_error_reporting(pdev); 8549 pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index f5c6af2b891b..1918e0abf734 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -223,7 +223,7 @@ out:
223 * received an ack to that message within delay * timeout period 223 * received an ack to that message within delay * timeout period
224 **/ 224 **/
225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, 225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
226 u16 mbx_id) 226 u16 mbx_id)
227{ 227{
228 struct ixgbe_mbx_info *mbx = &hw->mbx; 228 struct ixgbe_mbx_info *mbx = &hw->mbx;
229 s32 ret_val = IXGBE_ERR_MBX; 229 s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
269 u32 vf_bit = vf_number % 16; 269 u32 vf_bit = vf_number % 16;
270 270
271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, 271 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
272 index)) { 272 index)) {
273 ret_val = 0; 273 ret_val = 0;
274 hw->mbx.stats.reqs++; 274 hw->mbx.stats.reqs++;
275 } 275 }
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
291 u32 vf_bit = vf_number % 16; 291 u32 vf_bit = vf_number % 16;
292 292
293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, 293 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
294 index)) { 294 index)) {
295 ret_val = 0; 295 ret_val = 0;
296 hw->mbx.stats.acks++; 296 hw->mbx.stats.acks++;
297 } 297 }
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
366 * returns SUCCESS if it successfully copied message into the buffer 366 * returns SUCCESS if it successfully copied message into the buffer
367 **/ 367 **/
368static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 368static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
369 u16 vf_number) 369 u16 vf_number)
370{ 370{
371 s32 ret_val; 371 s32 ret_val;
372 u16 i; 372 u16 i;
@@ -407,7 +407,7 @@ out_no_write:
407 * a message due to a VF request so no polling for message is needed. 407 * a message due to a VF request so no polling for message is needed.
408 **/ 408 **/
409static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, 409static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
410 u16 vf_number) 410 u16 vf_number)
411{ 411{
412 s32 ret_val; 412 s32 ret_val;
413 u16 i; 413 u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a9b9ad69ed0e..a5cb755de3a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -54,11 +54,11 @@
54 * Message ACK's are the value or'd with 0xF0000000 54 * Message ACK's are the value or'd with 0xF0000000
55 */ 55 */
56#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 56#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
57 * this are the ACK */ 57 * this are the ACK */
58#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 58#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
59 * this are the NACK */ 59 * this are the NACK */
60#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 60#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
61 clear to send requests */ 61 clear to send requests */
62#define IXGBE_VT_MSGINFO_SHIFT 16 62#define IXGBE_VT_MSGINFO_SHIFT 16
63/* bits 23:16 are used for exra info for certain messages */ 63/* bits 23:16 are used for exra info for certain messages */
64#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 64#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index a76af8e28a04..ff68b7a9deff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { 67 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
68 ixgbe_get_phy_id(hw); 68 ixgbe_get_phy_id(hw);
69 hw->phy.type = 69 hw->phy.type =
70 ixgbe_get_phy_type_from_id(hw->phy.id); 70 ixgbe_get_phy_type_from_id(hw->phy.id);
71 71
72 if (hw->phy.type == ixgbe_phy_unknown) { 72 if (hw->phy.type == ixgbe_phy_unknown) {
73 hw->phy.ops.read_reg(hw, 73 hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
136 u16 phy_id_low = 0; 136 u16 phy_id_low = 0;
137 137
138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, 138 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
139 &phy_id_high); 139 &phy_id_high);
140 140
141 if (status == 0) { 141 if (status == 0) {
142 hw->phy.id = (u32)(phy_id_high << 16); 142 hw->phy.id = (u32)(phy_id_high << 16);
143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, 143 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
144 &phy_id_low); 144 &phy_id_low);
145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 145 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 146 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
147 } 147 }
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
318 * @phy_data: Pointer to read data from PHY register 318 * @phy_data: Pointer to read data from PHY register
319 **/ 319 **/
320s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 320s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
321 u32 device_type, u16 *phy_data) 321 u32 device_type, u16 *phy_data)
322{ 322{
323 s32 status; 323 s32 status;
324 u16 gssr; 324 u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
421 * @phy_data: Data to write to the PHY register 421 * @phy_data: Data to write to the PHY register
422 **/ 422 **/
423s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 423s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
424 u32 device_type, u16 phy_data) 424 u32 device_type, u16 phy_data)
425{ 425{
426 s32 status; 426 s32 status;
427 u16 gssr; 427 u16 gssr;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
548 * @speed: new link speed 548 * @speed: new link speed
549 **/ 549 **/
550s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 550s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
551 ixgbe_link_speed speed, 551 ixgbe_link_speed speed,
552 bool autoneg_wait_to_complete) 552 bool autoneg_wait_to_complete)
553{ 553{
554 554
555 /* 555 /*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
582 * Determines the link capabilities by reading the AUTOC register. 582 * Determines the link capabilities by reading the AUTOC register.
583 */ 583 */
584s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 584s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
585 ixgbe_link_speed *speed, 585 ixgbe_link_speed *speed,
586 bool *autoneg) 586 bool *autoneg)
587{ 587{
588 s32 status = IXGBE_ERR_LINK_SETUP; 588 s32 status = IXGBE_ERR_LINK_SETUP;
589 u16 speed_ability; 589 u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
592 *autoneg = true; 592 *autoneg = true;
593 593
594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, 594 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
595 &speed_ability); 595 &speed_ability);
596 596
597 if (status == 0) { 597 if (status == 0) {
598 if (speed_ability & MDIO_SPEED_10G) 598 if (speed_ability & MDIO_SPEED_10G)
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
806 806
807 /* reset the PHY and poll for completion */ 807 /* reset the PHY and poll for completion */
808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 808 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
809 (phy_data | MDIO_CTRL1_RESET)); 809 (phy_data | MDIO_CTRL1_RESET));
810 810
811 for (i = 0; i < 100; i++) { 811 for (i = 0; i < 100; i++) {
812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 812 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
813 &phy_data); 813 &phy_data);
814 if ((phy_data & MDIO_CTRL1_RESET) == 0) 814 if ((phy_data & MDIO_CTRL1_RESET) == 0)
815 break; 815 break;
816 usleep_range(10000, 20000); 816 usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
824 824
825 /* Get init offsets */ 825 /* Get init offsets */
826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
827 &data_offset); 827 &data_offset);
828 if (ret_val != 0) 828 if (ret_val != 0)
829 goto out; 829 goto out;
830 830
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
838 if (ret_val) 838 if (ret_val)
839 goto err_eeprom; 839 goto err_eeprom;
840 control = (eword & IXGBE_CONTROL_MASK_NL) >> 840 control = (eword & IXGBE_CONTROL_MASK_NL) >>
841 IXGBE_CONTROL_SHIFT_NL; 841 IXGBE_CONTROL_SHIFT_NL;
842 edata = eword & IXGBE_DATA_MASK_NL; 842 edata = eword & IXGBE_DATA_MASK_NL;
843 switch (control) { 843 switch (control) {
844 case IXGBE_DELAY_NL: 844 case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
859 if (ret_val) 859 if (ret_val)
860 goto err_eeprom; 860 goto err_eeprom;
861 hw->phy.ops.write_reg(hw, phy_offset, 861 hw->phy.ops.write_reg(hw, phy_offset,
862 MDIO_MMD_PMAPMD, eword); 862 MDIO_MMD_PMAPMD, eword);
863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 863 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
864 phy_offset); 864 phy_offset);
865 data_offset++; 865 data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1011 if (hw->bus.lan_id == 0) 1011 if (hw->bus.lan_id == 0)
1012 hw->phy.sfp_type = 1012 hw->phy.sfp_type =
1013 ixgbe_sfp_type_da_cu_core0; 1013 ixgbe_sfp_type_da_cu_core0;
1014 else 1014 else
1015 hw->phy.sfp_type = 1015 hw->phy.sfp_type =
1016 ixgbe_sfp_type_da_cu_core1; 1016 ixgbe_sfp_type_da_cu_core1;
1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { 1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1018 hw->phy.ops.read_i2c_eeprom( 1018 hw->phy.ops.read_i2c_eeprom(
1019 hw, IXGBE_SFF_CABLE_SPEC_COMP, 1019 hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1035 IXGBE_SFF_10GBASELR_CAPABLE)) { 1035 IXGBE_SFF_10GBASELR_CAPABLE)) {
1036 if (hw->bus.lan_id == 0) 1036 if (hw->bus.lan_id == 0)
1037 hw->phy.sfp_type = 1037 hw->phy.sfp_type =
1038 ixgbe_sfp_type_srlr_core0; 1038 ixgbe_sfp_type_srlr_core0;
1039 else 1039 else
1040 hw->phy.sfp_type = 1040 hw->phy.sfp_type =
1041 ixgbe_sfp_type_srlr_core1; 1041 ixgbe_sfp_type_srlr_core1;
1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { 1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1043 if (hw->bus.lan_id == 0) 1043 if (hw->bus.lan_id == 0)
1044 hw->phy.sfp_type = 1044 hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1087 goto err_read_i2c_eeprom; 1087 goto err_read_i2c_eeprom;
1088 1088
1089 status = hw->phy.ops.read_i2c_eeprom(hw, 1089 status = hw->phy.ops.read_i2c_eeprom(hw,
1090 IXGBE_SFF_VENDOR_OUI_BYTE1, 1090 IXGBE_SFF_VENDOR_OUI_BYTE1,
1091 &oui_bytes[1]); 1091 &oui_bytes[1]);
1092 1092
1093 if (status != 0) 1093 if (status != 0)
1094 goto err_read_i2c_eeprom; 1094 goto err_read_i2c_eeprom;
1095 1095
1096 status = hw->phy.ops.read_i2c_eeprom(hw, 1096 status = hw->phy.ops.read_i2c_eeprom(hw,
1097 IXGBE_SFF_VENDOR_OUI_BYTE2, 1097 IXGBE_SFF_VENDOR_OUI_BYTE2,
1098 &oui_bytes[2]); 1098 &oui_bytes[2]);
1099 1099
1100 if (status != 0) 1100 if (status != 0)
1101 goto err_read_i2c_eeprom; 1101 goto err_read_i2c_eeprom;
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
1403 * so it returns the offsets to the phy init sequence block. 1403 * so it returns the offsets to the phy init sequence block.
1404 **/ 1404 **/
1405s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1405s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1406 u16 *list_offset, 1406 u16 *list_offset,
1407 u16 *data_offset) 1407 u16 *data_offset)
1408{ 1408{
1409 u16 sfp_id; 1409 u16 sfp_id;
1410 u16 sfp_type = hw->phy.sfp_type; 1410 u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
1493 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1493 * Performs byte read operation to SFP module's EEPROM over I2C interface.
1494 **/ 1494 **/
1495s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1495s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1496 u8 *eeprom_data) 1496 u8 *eeprom_data)
1497{ 1497{
1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset, 1498 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1499 IXGBE_I2C_EEPROM_DEV_ADDR, 1499 IXGBE_I2C_EEPROM_DEV_ADDR,
1500 eeprom_data); 1500 eeprom_data);
1501} 1501}
1502 1502
1503/** 1503/**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1525 * Performs byte write operation to SFP module's EEPROM over I2C interface. 1525 * Performs byte write operation to SFP module's EEPROM over I2C interface.
1526 **/ 1526 **/
1527s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 1527s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1528 u8 eeprom_data) 1528 u8 eeprom_data)
1529{ 1529{
1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset, 1530 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1531 IXGBE_I2C_EEPROM_DEV_ADDR, 1531 IXGBE_I2C_EEPROM_DEV_ADDR,
1532 eeprom_data); 1532 eeprom_data);
1533} 1533}
1534 1534
1535/** 1535/**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1542 * a specified device address. 1542 * a specified device address.
1543 **/ 1543 **/
1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1545 u8 dev_addr, u8 *data) 1545 u8 dev_addr, u8 *data)
1546{ 1546{
1547 s32 status = 0; 1547 s32 status = 0;
1548 u32 max_retry = 10; 1548 u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
1631 * a specified device address. 1631 * a specified device address.
1632 **/ 1632 **/
1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1634 u8 dev_addr, u8 data) 1634 u8 dev_addr, u8 data)
1635{ 1635{
1636 s32 status = 0; 1636 s32 status = 0;
1637 u32 max_retry = 1; 1637 u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2046 2046
2047 /* Check that the LASI temp alarm status was triggered */ 2047 /* Check that the LASI temp alarm status was triggered */
2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2049 MDIO_MMD_PMAPMD, &phy_data); 2049 MDIO_MMD_PMAPMD, &phy_data);
2050 2050
2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2052 goto out; 2052 goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 0bb047f751c2..54071ed17e3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); 115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
117 u32 device_type, u16 *phy_data); 117 u32 device_type, u16 *phy_data);
118s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 118s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
119 u32 device_type, u16 phy_data); 119 u32 device_type, u16 phy_data);
120s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 120s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
121 u32 device_type, u16 *phy_data); 121 u32 device_type, u16 *phy_data);
122s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, 122s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
123 u32 device_type, u16 phy_data); 123 u32 device_type, u16 phy_data);
124s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 124s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
125s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 125s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
126 ixgbe_link_speed speed, 126 ixgbe_link_speed speed,
127 bool autoneg_wait_to_complete); 127 bool autoneg_wait_to_complete);
128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
129 ixgbe_link_speed *speed, 129 ixgbe_link_speed *speed,
130 bool *autoneg); 130 bool *autoneg);
131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); 131bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
132 132
133/* PHY specific */ 133/* PHY specific */
134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
135 ixgbe_link_speed *speed, 135 ixgbe_link_speed *speed,
136 bool *link_up); 136 bool *link_up);
137s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); 137s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
138s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 138s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
139 u16 *firmware_version); 139 u16 *firmware_version);
140s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 140s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
141 u16 *firmware_version); 141 u16 *firmware_version);
142 142
143s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 143s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
144s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); 144s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
145s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 145s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
146s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 146s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
147 u16 *list_offset, 147 u16 *list_offset,
148 u16 *data_offset); 148 u16 *data_offset);
149s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); 149s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
150s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 150s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
151 u8 dev_addr, u8 *data); 151 u8 dev_addr, u8 *data);
152s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 152s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
153 u8 dev_addr, u8 data); 153 u8 dev_addr, u8 data);
154s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 154s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
155 u8 *eeprom_data); 155 u8 *eeprom_data);
156s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, 156s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
157 u8 *sff8472_data); 157 u8 *sff8472_data);
158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
159 u8 eeprom_data); 159 u8 eeprom_data);
160#endif /* _IXGBE_PHY_H_ */ 160#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 8902ae683457..68f87ecb8a76 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,7 +26,6 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28#include "ixgbe.h" 28#include "ixgbe.h"
29#include <linux/export.h>
30#include <linux/ptp_classify.h> 29#include <linux/ptp_classify.h>
31 30
32/* 31/*
@@ -334,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
334} 333}
335 334
336/** 335/**
337 * ixgbe_ptp_enable 336 * ixgbe_ptp_feature_enable
338 * @ptp: the ptp clock structure 337 * @ptp: the ptp clock structure
339 * @rq: the requested feature to change 338 * @rq: the requested feature to change
340 * @on: whether to enable or disable the feature 339 * @on: whether to enable or disable the feature
@@ -342,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
342 * enable (or disable) ancillary features of the phc subsystem. 341 * enable (or disable) ancillary features of the phc subsystem.
343 * our driver only supports the PPS feature on the X540 342 * our driver only supports the PPS feature on the X540
344 */ 343 */
345static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, 344static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
346 struct ptp_clock_request *rq, int on) 345 struct ptp_clock_request *rq, int on)
347{ 346{
348 struct ixgbe_adapter *adapter = 347 struct ixgbe_adapter *adapter =
349 container_of(ptp, struct ixgbe_adapter, ptp_caps); 348 container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -570,9 +569,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
570} 569}
571 570
572/** 571/**
573 * ixgbe_ptp_set_ts_config - control hardware time stamping 572 * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
574 * @adapter: pointer to adapter struct 573 * @adapter: the private ixgbe adapter structure
575 * @ifreq: ioctl data 574 * @config: the hwtstamp configuration requested
576 * 575 *
577 * Outgoing time stamping can be enabled and disabled. Play nice and 576 * Outgoing time stamping can be enabled and disabled. Play nice and
578 * disable it when requested, although it shouldn't cause any overhead 577 * disable it when requested, although it shouldn't cause any overhead
@@ -590,25 +589,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
590 * packets, regardless of the type specified in the register, only use V2 589 * packets, regardless of the type specified in the register, only use V2
591 * Event mode. This more accurately tells the user what the hardware is going 590 * Event mode. This more accurately tells the user what the hardware is going
592 * to do anyways. 591 * to do anyways.
592 *
593 * Note: this may modify the hwtstamp configuration towards a more general
594 * mode, if required to support the specifically requested mode.
593 */ 595 */
594int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 596static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
597 struct hwtstamp_config *config)
595{ 598{
596 struct ixgbe_hw *hw = &adapter->hw; 599 struct ixgbe_hw *hw = &adapter->hw;
597 struct hwtstamp_config config;
598 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; 600 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
599 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; 601 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
600 u32 tsync_rx_mtrl = PTP_EV_PORT << 16; 602 u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
601 bool is_l2 = false; 603 bool is_l2 = false;
602 u32 regval; 604 u32 regval;
603 605
604 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
605 return -EFAULT;
606
607 /* reserved for future extensions */ 606 /* reserved for future extensions */
608 if (config.flags) 607 if (config->flags)
609 return -EINVAL; 608 return -EINVAL;
610 609
611 switch (config.tx_type) { 610 switch (config->tx_type) {
612 case HWTSTAMP_TX_OFF: 611 case HWTSTAMP_TX_OFF:
613 tsync_tx_ctl = 0; 612 tsync_tx_ctl = 0;
614 case HWTSTAMP_TX_ON: 613 case HWTSTAMP_TX_ON:
@@ -617,7 +616,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
617 return -ERANGE; 616 return -ERANGE;
618 } 617 }
619 618
620 switch (config.rx_filter) { 619 switch (config->rx_filter) {
621 case HWTSTAMP_FILTER_NONE: 620 case HWTSTAMP_FILTER_NONE:
622 tsync_rx_ctl = 0; 621 tsync_rx_ctl = 0;
623 tsync_rx_mtrl = 0; 622 tsync_rx_mtrl = 0;
@@ -641,7 +640,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
641 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 640 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
642 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; 641 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
643 is_l2 = true; 642 is_l2 = true;
644 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 643 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645 break; 644 break;
646 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 645 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
647 case HWTSTAMP_FILTER_ALL: 646 case HWTSTAMP_FILTER_ALL:
@@ -652,7 +651,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
652 * Delay_Req messages and hardware does not support 651 * Delay_Req messages and hardware does not support
653 * timestamping all packets => return error 652 * timestamping all packets => return error
654 */ 653 */
655 config.rx_filter = HWTSTAMP_FILTER_NONE; 654 config->rx_filter = HWTSTAMP_FILTER_NONE;
656 return -ERANGE; 655 return -ERANGE;
657 } 656 }
658 657
@@ -671,7 +670,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
671 else 670 else
672 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 671 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
673 672
674
675 /* enable/disable TX */ 673 /* enable/disable TX */
676 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 674 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
677 regval &= ~IXGBE_TSYNCTXCTL_ENABLED; 675 regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -693,6 +691,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
693 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); 691 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
694 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 692 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
695 693
694 return 0;
695}
696
697/**
698 * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
699 * @adapter: pointer to adapter struct
700 * @ifreq: ioctl data
701 *
702 * Set hardware to requested mode. If unsupported, return an error with no
703 * changes. Otherwise, store the mode for future reference.
704 */
705int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
706{
707 struct hwtstamp_config config;
708 int err;
709
710 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
711 return -EFAULT;
712
713 err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
714 if (err)
715 return err;
716
696 /* save these settings for future reference */ 717 /* save these settings for future reference */
697 memcpy(&adapter->tstamp_config, &config, 718 memcpy(&adapter->tstamp_config, &config,
698 sizeof(adapter->tstamp_config)); 719 sizeof(adapter->tstamp_config));
@@ -790,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
790 * ixgbe_ptp_reset 811 * ixgbe_ptp_reset
791 * @adapter: the ixgbe private board structure 812 * @adapter: the ixgbe private board structure
792 * 813 *
793 * When the MAC resets, all timesync features are reset. This function should be 814 * When the MAC resets, all the hardware bits for timesync are reset. This
794 * called to re-enable the PTP clock structure. It will re-init the timecounter 815 * function is used to re-enable the device for PTP based on current settings.
795 * structure based on the kernel time as well as setup the cycle counter data. 816 * We do lose the current clock time, so just reset the cyclecounter to the
817 * system real clock time.
818 *
819 * This function will maintain hwtstamp_config settings, and resets the SDP
820 * output if it was enabled.
796 */ 821 */
797void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) 822void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
798{ 823{
@@ -804,8 +829,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
804 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); 829 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
805 IXGBE_WRITE_FLUSH(hw); 830 IXGBE_WRITE_FLUSH(hw);
806 831
807 /* Reset the saved tstamp_config */ 832 /* reset the hardware timestamping mode */
808 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); 833 ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
809 834
810 ixgbe_ptp_start_cyclecounter(adapter); 835 ixgbe_ptp_start_cyclecounter(adapter);
811 836
@@ -825,16 +850,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
825} 850}
826 851
827/** 852/**
828 * ixgbe_ptp_init 853 * ixgbe_ptp_create_clock
829 * @adapter: the ixgbe private adapter structure 854 * @adapter: the ixgbe private adapter structure
830 * 855 *
831 * This function performs the required steps for enabling ptp 856 * This function performs setup of the user entry point function table and
832 * support. If ptp support has already been loaded it simply calls the 857 * initializes the PTP clock device, which is used to access the clock-like
833 * cyclecounter init routine and exits. 858 * features of the PTP core. It will be called by ixgbe_ptp_init, only if
859 * there isn't already a clock device (such as after a suspend/resume cycle,
860 * where the clock device wasn't destroyed).
834 */ 861 */
835void ixgbe_ptp_init(struct ixgbe_adapter *adapter) 862static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
836{ 863{
837 struct net_device *netdev = adapter->netdev; 864 struct net_device *netdev = adapter->netdev;
865 long err;
866
867 /* do nothing if we already have a clock device */
868 if (!IS_ERR_OR_NULL(adapter->ptp_clock))
869 return 0;
838 870
839 switch (adapter->hw.mac.type) { 871 switch (adapter->hw.mac.type) {
840 case ixgbe_mac_X540: 872 case ixgbe_mac_X540:
@@ -851,7 +883,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
851 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; 883 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
852 adapter->ptp_caps.gettime = ixgbe_ptp_gettime; 884 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
853 adapter->ptp_caps.settime = ixgbe_ptp_settime; 885 adapter->ptp_caps.settime = ixgbe_ptp_settime;
854 adapter->ptp_caps.enable = ixgbe_ptp_enable; 886 adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
855 break; 887 break;
856 case ixgbe_mac_82599EB: 888 case ixgbe_mac_82599EB:
857 snprintf(adapter->ptp_caps.name, 889 snprintf(adapter->ptp_caps.name,
@@ -867,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
867 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; 899 adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
868 adapter->ptp_caps.gettime = ixgbe_ptp_gettime; 900 adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
869 adapter->ptp_caps.settime = ixgbe_ptp_settime; 901 adapter->ptp_caps.settime = ixgbe_ptp_settime;
870 adapter->ptp_caps.enable = ixgbe_ptp_enable; 902 adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
871 break; 903 break;
872 default: 904 default:
873 adapter->ptp_clock = NULL; 905 adapter->ptp_clock = NULL;
874 return; 906 return -EOPNOTSUPP;
875 } 907 }
876 908
877 spin_lock_init(&adapter->tmreg_lock);
878 INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
879
880 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 909 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
881 &adapter->pdev->dev); 910 &adapter->pdev->dev);
882 if (IS_ERR(adapter->ptp_clock)) { 911 if (IS_ERR(adapter->ptp_clock)) {
912 err = PTR_ERR(adapter->ptp_clock);
883 adapter->ptp_clock = NULL; 913 adapter->ptp_clock = NULL;
884 e_dev_err("ptp_clock_register failed\n"); 914 e_dev_err("ptp_clock_register failed\n");
915 return err;
885 } else 916 } else
886 e_dev_info("registered PHC device on %s\n", netdev->name); 917 e_dev_info("registered PHC device on %s\n", netdev->name);
887 918
919 /* set default timestamp mode to disabled here. We do this in
920 * create_clock instead of init, because we don't want to override the
921 * previous settings during a resume cycle.
922 */
923 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
924 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
925
926 return 0;
927}
928
929/**
930 * ixgbe_ptp_init
931 * @adapter: the ixgbe private adapter structure
932 *
933 * This function performs the required steps for enabling PTP
934 * support. If PTP support has already been loaded it simply calls the
935 * cyclecounter init routine and exits.
936 */
937void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
938{
939 /* initialize the spin lock first since we can't control when a user
940 * will call the entry functions once we have initialized the clock
941 * device
942 */
943 spin_lock_init(&adapter->tmreg_lock);
944
945 /* obtain a PTP device, or re-use an existing device */
946 if (ixgbe_ptp_create_clock(adapter))
947 return;
948
949 /* we have a clock so we can initialize work now */
950 INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
951
952 /* reset the PTP related hardware bits */
888 ixgbe_ptp_reset(adapter); 953 ixgbe_ptp_reset(adapter);
889 954
890 /* enter the IXGBE_PTP_RUNNING state */ 955 /* enter the IXGBE_PTP_RUNNING state */
@@ -894,28 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
894} 959}
895 960
896/** 961/**
897 * ixgbe_ptp_stop - disable ptp device and stop the overflow check 962 * ixgbe_ptp_suspend - stop PTP work items
898 * @adapter: pointer to adapter struct 963 * @ adapter: pointer to adapter struct
899 * 964 *
900 * this function stops the ptp support, and cancels the delayed work. 965 * this function suspends PTP activity, and prevents more PTP work from being
966 * generated, but does not destroy the PTP clock device.
901 */ 967 */
902void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 968void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
903{ 969{
904 /* Leave the IXGBE_PTP_RUNNING state. */ 970 /* Leave the IXGBE_PTP_RUNNING state. */
905 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 971 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
906 return; 972 return;
907 973
908 /* stop the PPS signal */ 974 /* since this might be called in suspend, we don't clear the state,
909 adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; 975 * but simply reset the auxiliary PPS signal control register
910 ixgbe_ptp_setup_sdp(adapter); 976 */
977 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
911 978
979 /* ensure that we cancel any pending PTP Tx work item in progress */
912 cancel_work_sync(&adapter->ptp_tx_work); 980 cancel_work_sync(&adapter->ptp_tx_work);
913 if (adapter->ptp_tx_skb) { 981 if (adapter->ptp_tx_skb) {
914 dev_kfree_skb_any(adapter->ptp_tx_skb); 982 dev_kfree_skb_any(adapter->ptp_tx_skb);
915 adapter->ptp_tx_skb = NULL; 983 adapter->ptp_tx_skb = NULL;
916 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 984 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
917 } 985 }
986}
987
988/**
989 * ixgbe_ptp_stop - close the PTP device
990 * @adapter: pointer to adapter struct
991 *
992 * completely destroy the PTP device, should only be called when the device is
993 * being fully closed.
994 */
995void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
996{
997 /* first, suspend PTP activity */
998 ixgbe_ptp_suspend(adapter);
918 999
1000 /* disable the PTP clock device */
919 if (adapter->ptp_clock) { 1001 if (adapter->ptp_clock) {
920 ptp_clock_unregister(adapter->ptp_clock); 1002 ptp_clock_unregister(adapter->ptp_clock);
921 adapter->ptp_clock = NULL; 1003 adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..16b3a1cd9db6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
72 for (i = 0; i < num_vf_macvlans; i++) { 72 for (i = 0; i < num_vf_macvlans; i++) {
73 mv_list->vf = -1; 73 mv_list->vf = -1;
74 mv_list->free = true; 74 mv_list->free = true;
75 mv_list->rar_entry = hw->mac.num_rar_entries -
76 (i + adapter->num_vfs + 1);
77 list_add(&mv_list->l, &adapter->vf_mvs.l); 75 list_add(&mv_list->l, &adapter->vf_mvs.l);
78 mv_list++; 76 mv_list++;
79 } 77 }
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
327 u32 vector_bit; 325 u32 vector_bit;
328 u32 vector_reg; 326 u32 vector_reg;
329 u32 mta_reg; 327 u32 mta_reg;
328 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
330 329
331 /* only so many hash values supported */ 330 /* only so many hash values supported */
332 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 331 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
353 mta_reg |= (1 << vector_bit); 352 mta_reg |= (1 << vector_bit);
354 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 353 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
355 } 354 }
355 vmolr |= IXGBE_VMOLR_ROMPE;
356 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
356 357
357 return 0; 358 return 0;
358} 359}
359 360
360static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) 361#ifdef CONFIG_PCI_IOV
361{
362 struct ixgbe_hw *hw = &adapter->hw;
363 struct list_head *pos;
364 struct vf_macvlans *entry;
365
366 list_for_each(pos, &adapter->vf_mvs.l) {
367 entry = list_entry(pos, struct vf_macvlans, l);
368 if (!entry->free)
369 hw->mac.ops.set_rar(hw, entry->rar_entry,
370 entry->vf_macvlan,
371 entry->vf, IXGBE_RAH_AV);
372 }
373}
374
375void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 362void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
376{ 363{
377 struct ixgbe_hw *hw = &adapter->hw; 364 struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
382 u32 mta_reg; 369 u32 mta_reg;
383 370
384 for (i = 0; i < adapter->num_vfs; i++) { 371 for (i = 0; i < adapter->num_vfs; i++) {
372 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
385 vfinfo = &adapter->vfinfo[i]; 373 vfinfo = &adapter->vfinfo[i];
386 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { 374 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
387 hw->addr_ctrl.mta_in_use++; 375 hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
391 mta_reg |= (1 << vector_bit); 379 mta_reg |= (1 << vector_bit);
392 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 380 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
393 } 381 }
382
383 if (vfinfo->num_vf_mc_hashes)
384 vmolr |= IXGBE_VMOLR_ROMPE;
385 else
386 vmolr &= ~IXGBE_VMOLR_ROMPE;
387 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
394 } 388 }
395 389
396 /* Restore any VF macvlans */ 390 /* Restore any VF macvlans */
397 ixgbe_restore_vf_macvlans(adapter); 391 ixgbe_full_sync_mac_table(adapter);
398} 392}
393#endif
399 394
400static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 395static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
401 u32 vf) 396 u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
495static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 490static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
496{ 491{
497 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 492 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
498 vmolr |= (IXGBE_VMOLR_ROMPE | 493 vmolr |= IXGBE_VMOLR_BAM;
499 IXGBE_VMOLR_BAM);
500 if (aupe) 494 if (aupe)
501 vmolr |= IXGBE_VMOLR_AUPE; 495 vmolr |= IXGBE_VMOLR_AUPE;
502 else 496 else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
514{ 508{
515 struct ixgbe_hw *hw = &adapter->hw; 509 struct ixgbe_hw *hw = &adapter->hw;
516 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 510 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
517 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
518 u8 num_tcs = netdev_get_num_tc(adapter->netdev); 511 u8 num_tcs = netdev_get_num_tc(adapter->netdev);
519 512
520 /* add PF assigned VLAN or VLAN 0 */ 513 /* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
544 /* Flush and reset the mta with the new values */ 537 /* Flush and reset the mta with the new values */
545 ixgbe_set_rx_mode(adapter->netdev); 538 ixgbe_set_rx_mode(adapter->netdev);
546 539
547 hw->mac.ops.clear_rar(hw, rar_entry); 540 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
548 541
549 /* reset VF api back to unknown */ 542 /* reset VF api back to unknown */
550 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 543 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
553static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 546static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
554 int vf, unsigned char *mac_addr) 547 int vf, unsigned char *mac_addr)
555{ 548{
556 struct ixgbe_hw *hw = &adapter->hw; 549 ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
557 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
558
559 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 550 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
560 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); 551 ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
561 552
562 return 0; 553 return 0;
563} 554}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
565static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, 556static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
566 int vf, int index, unsigned char *mac_addr) 557 int vf, int index, unsigned char *mac_addr)
567{ 558{
568 struct ixgbe_hw *hw = &adapter->hw;
569 struct list_head *pos; 559 struct list_head *pos;
570 struct vf_macvlans *entry; 560 struct vf_macvlans *entry;
571 561
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
576 entry->vf = -1; 566 entry->vf = -1;
577 entry->free = true; 567 entry->free = true;
578 entry->is_macvlan = false; 568 entry->is_macvlan = false;
579 hw->mac.ops.clear_rar(hw, entry->rar_entry); 569 ixgbe_del_mac_filter(adapter,
570 entry->vf_macvlan, vf);
580 } 571 }
581 } 572 }
582 } 573 }
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
612 entry->vf = vf; 603 entry->vf = vf;
613 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); 604 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
614 605
615 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); 606 ixgbe_add_mac_filter(adapter, mac_addr, vf);
616 607
617 return 0; 608 return 0;
618} 609}
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1138 adapter->vfinfo[vf].vlan_count--; 1129 adapter->vfinfo[vf].vlan_count--;
1139 adapter->vfinfo[vf].pf_vlan = 0; 1130 adapter->vfinfo[vf].pf_vlan = 0;
1140 adapter->vfinfo[vf].pf_qos = 0; 1131 adapter->vfinfo[vf].pf_qos = 0;
1141 } 1132 }
1142out: 1133out:
1143 return err; 1134 return err;
1144} 1135}
1145 1136
1146static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1137static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1231 } 1222 }
1232} 1223}
1233 1224
1234int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 1225int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1226 int max_tx_rate)
1235{ 1227{
1236 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1228 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1237 int link_speed; 1229 int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
1249 if (link_speed != 10000) 1241 if (link_speed != 10000)
1250 return -EINVAL; 1242 return -EINVAL;
1251 1243
1244 if (min_tx_rate)
1245 return -EINVAL;
1246
1252 /* rate limit cannot be less than 10Mbs or greater than link speed */ 1247 /* rate limit cannot be less than 10Mbs or greater than link speed */
1253 if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) 1248 if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1254 return -EINVAL; 1249 return -EINVAL;
1255 1250
1256 /* store values */ 1251 /* store values */
1257 adapter->vf_rate_link_speed = link_speed; 1252 adapter->vf_rate_link_speed = link_speed;
1258 adapter->vfinfo[vf].tx_rate = tx_rate; 1253 adapter->vfinfo[vf].tx_rate = max_tx_rate;
1259 1254
1260 /* update hardware configuration */ 1255 /* update hardware configuration */
1261 ixgbe_set_vf_rate_limit(adapter, vf); 1256 ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1297 return -EINVAL; 1292 return -EINVAL;
1298 ivi->vf = vf; 1293 ivi->vf = vf;
1299 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 1294 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1300 ivi->tx_rate = adapter->vfinfo[vf].tx_rate; 1295 ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1296 ivi->min_tx_rate = 0;
1301 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 1297 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1302 ivi->qos = adapter->vfinfo[vf].pf_qos; 1298 ivi->qos = adapter->vfinfo[vf].pf_qos;
1303 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; 1299 ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..32c26d586c01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
34 */ 34 */
35#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) 35#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
36 36
37#ifdef CONFIG_PCI_IOV
37void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 38void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
39#endif
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 40void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); 41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
40void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
42int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); 44int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
43int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, 45int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
44 u8 qos); 46 u8 qos);
45int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 47int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
48 int max_tx_rate);
46int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); 49int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
47int ixgbe_ndo_get_vf_config(struct net_device *netdev, 50int ixgbe_ndo_get_vf_config(struct net_device *netdev,
48 int vf, struct ifla_vf_info *ivi); 51 int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..9a89f98b35f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
160#define IXGBE_MAX_EITR 0x00000FF8 160#define IXGBE_MAX_EITR 0x00000FF8
161#define IXGBE_MIN_EITR 8 161#define IXGBE_MIN_EITR 8
162#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ 162#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
163 (0x012300 + (((_i) - 24) * 4))) 163 (0x012300 + (((_i) - 24) * 4)))
164#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 164#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
165#define IXGBE_EITR_LLI_MOD 0x00008000 165#define IXGBE_EITR_LLI_MOD 0x00008000
166#define IXGBE_EITR_CNT_WDIS 0x80000000 166#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
213 * 64-127: 0x0D014 + (n-64)*0x40 213 * 64-127: 0x0D014 + (n-64)*0x40
214 */ 214 */
215#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 215#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
216 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 216 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
217 (0x0D014 + (((_i) - 64) * 0x40)))) 217 (0x0D014 + (((_i) - 64) * 0x40))))
218/* 218/*
219 * Rx DCA Control Register: 219 * Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
222 * 64-127: 0x0D00C + (n-64)*0x40 222 * 64-127: 0x0D00C + (n-64)*0x40
223 */ 223 */
224#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 224#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
225 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 225 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
226 (0x0D00C + (((_i) - 64) * 0x40)))) 226 (0x0D00C + (((_i) - 64) * 0x40))))
227#define IXGBE_RDRXCTL 0x02F00 227#define IXGBE_RDRXCTL 0x02F00
228#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 228#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
229 /* 8 of these 0x03C00 - 0x03C1C */ 229 /* 8 of these 0x03C00 - 0x03C1C */
230#define IXGBE_RXCTRL 0x03000 230#define IXGBE_RXCTRL 0x03000
231#define IXGBE_DROPEN 0x03D04 231#define IXGBE_DROPEN 0x03D04
232#define IXGBE_RXPBSIZE_SHIFT 10 232#define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
239/* Multicast Table Array - 128 entries */ 239/* Multicast Table Array - 128 entries */
240#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 240#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
241#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 241#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
242 (0x0A200 + ((_i) * 8))) 242 (0x0A200 + ((_i) * 8)))
243#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 243#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
244 (0x0A204 + ((_i) * 8))) 244 (0x0A204 + ((_i) * 8)))
245#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) 245#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
246#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) 246#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
247/* Packet split receive type */ 247/* Packet split receive type */
248#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ 248#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
249 (0x0EA00 + ((_i) * 4))) 249 (0x0EA00 + ((_i) * 4)))
250/* array of 4096 1-bit vlan filters */ 250/* array of 4096 1-bit vlan filters */
251#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 251#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
252/*array of 4096 4-bit vlan vmdq indices */ 252/*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
696 696
697#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) 697#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
698#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ 698#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
699 (0x08600 + ((_i) * 4))) 699 (0x08600 + ((_i) * 4)))
700#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) 700#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
701 701
702#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 702#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
820#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 820#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
821#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 821#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
822#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ 822#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
823 IXGBE_GCR_EXT_VT_MODE_64) 823 IXGBE_GCR_EXT_VT_MODE_64)
824 824
825/* Time Sync Registers */ 825/* Time Sync Registers */
826#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 826#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
1396#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 1396#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
1397 1397
1398#define IXGBE_EIMS_ENABLE_MASK ( \ 1398#define IXGBE_EIMS_ENABLE_MASK ( \
1399 IXGBE_EIMS_RTX_QUEUE | \ 1399 IXGBE_EIMS_RTX_QUEUE | \
1400 IXGBE_EIMS_LSC | \ 1400 IXGBE_EIMS_LSC | \
1401 IXGBE_EIMS_TCP_TIMER | \ 1401 IXGBE_EIMS_TCP_TIMER | \
1402 IXGBE_EIMS_OTHER) 1402 IXGBE_EIMS_OTHER)
1403 1403
1404/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 1404/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
1405#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 1405#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
2161 2161
2162/* Masks to determine if packets should be dropped due to frame errors */ 2162/* Masks to determine if packets should be dropped due to frame errors */
2163#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ 2163#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
2164 IXGBE_RXD_ERR_CE | \ 2164 IXGBE_RXD_ERR_CE | \
2165 IXGBE_RXD_ERR_LE | \ 2165 IXGBE_RXD_ERR_LE | \
2166 IXGBE_RXD_ERR_PE | \ 2166 IXGBE_RXD_ERR_PE | \
2167 IXGBE_RXD_ERR_OSE | \ 2167 IXGBE_RXD_ERR_OSE | \
2168 IXGBE_RXD_ERR_USE) 2168 IXGBE_RXD_ERR_USE)
2169 2169
2170#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ 2170#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
2171 IXGBE_RXDADV_ERR_CE | \ 2171 IXGBE_RXDADV_ERR_CE | \
2172 IXGBE_RXDADV_ERR_LE | \ 2172 IXGBE_RXDADV_ERR_LE | \
2173 IXGBE_RXDADV_ERR_PE | \ 2173 IXGBE_RXDADV_ERR_PE | \
2174 IXGBE_RXDADV_ERR_OSE | \ 2174 IXGBE_RXDADV_ERR_OSE | \
2175 IXGBE_RXDADV_ERR_USE) 2175 IXGBE_RXDADV_ERR_USE)
2176 2176
2177/* Multicast bit mask */ 2177/* Multicast bit mask */
2178#define IXGBE_MCSTCTRL_MFE 0x4 2178#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
2393#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ 2393#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
2394#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 2394#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
2395#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 2395#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
2396 IXGBE_ADVTXD_POPTS_SHIFT) 2396 IXGBE_ADVTXD_POPTS_SHIFT)
2397#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 2397#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
2398 IXGBE_ADVTXD_POPTS_SHIFT) 2398 IXGBE_ADVTXD_POPTS_SHIFT)
2399#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 2399#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
2400#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 2400#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
2401#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 2401#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
2435#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 2435#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
2436#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 2436#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
2437#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ 2437#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
2438 IXGBE_LINK_SPEED_10GB_FULL) 2438 IXGBE_LINK_SPEED_10GB_FULL)
2439#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 2439#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
2440 IXGBE_LINK_SPEED_1GB_FULL | \ 2440 IXGBE_LINK_SPEED_1GB_FULL | \
2441 IXGBE_LINK_SPEED_10GB_FULL) 2441 IXGBE_LINK_SPEED_10GB_FULL)
2442 2442
2443 2443
2444/* Physical layer type */ 2444/* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
2746/* Flow control parameters */ 2746/* Flow control parameters */
2747struct ixgbe_fc_info { 2747struct ixgbe_fc_info {
2748 u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ 2748 u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
2749 u32 low_water; /* Flow Control Low-water */ 2749 u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
2750 u16 pause_time; /* Flow Control Pause timer */ 2750 u16 pause_time; /* Flow Control Pause timer */
2751 bool send_xon; /* Flow control send XON */ 2751 bool send_xon; /* Flow control send XON */
2752 bool strict_ieee; /* Strict IEEE mode */ 2752 bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
2840 2840
2841/* iterator type for walking multicast address lists */ 2841/* iterator type for walking multicast address lists */
2842typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, 2842typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
2843 u32 *vmdq); 2843 u32 *vmdq);
2844 2844
2845/* Function pointer table */ 2845/* Function pointer table */
2846struct ixgbe_eeprom_operations { 2846struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2887 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2888 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2889 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
2890 bool *); 2890 bool *);
2891 2891
2892 /* Packet Buffer Manipulation */ 2892 /* Packet Buffer Manipulation */
2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); 2893 void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 188a5974b85c..40dd798e1290 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
81 bool autoneg_wait_to_complete) 81 bool autoneg_wait_to_complete)
82{ 82{
83 return hw->phy.ops.setup_link_speed(hw, speed, 83 return hw->phy.ops.setup_link_speed(hw, speed,
84 autoneg_wait_to_complete); 84 autoneg_wait_to_complete);
85} 85}
86 86
87/** 87/**
@@ -155,7 +155,7 @@ mac_reset_top:
155 /* Add the SAN MAC address to the RAR only if it's a valid address */ 155 /* Add the SAN MAC address to the RAR only if it's a valid address */
156 if (is_valid_ether_addr(hw->mac.san_addr)) { 156 if (is_valid_ether_addr(hw->mac.san_addr)) {
157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 157 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
158 hw->mac.san_addr, 0, IXGBE_RAH_AV); 158 hw->mac.san_addr, 0, IXGBE_RAH_AV);
159 159
160 /* Save the SAN MAC RAR index */ 160 /* Save the SAN MAC RAR index */
161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 161 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
166 166
167 /* Store the alternative WWNN/WWPN prefix */ 167 /* Store the alternative WWNN/WWPN prefix */
168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
169 &hw->mac.wwpn_prefix); 169 &hw->mac.wwpn_prefix);
170 170
171reset_hw_out: 171reset_hw_out:
172 return status; 172 return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
237 237
238 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 238 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 239 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
240 IXGBE_EEC_SIZE_SHIFT); 240 IXGBE_EEC_SIZE_SHIFT);
241 eeprom->word_size = 1 << (eeprom_size + 241 eeprom->word_size = 1 << (eeprom_size +
242 IXGBE_EEPROM_WORD_SIZE_SHIFT); 242 IXGBE_EEPROM_WORD_SIZE_SHIFT);
243 243
244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 244 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
245 eeprom->type, eeprom->word_size); 245 eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
712 udelay(50); 712 udelay(50);
713 } 713 }
714 } else { 714 } else {
715 hw_dbg(hw, "Software semaphore SMBI between device drivers " 715 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
716 "not granted.\n");
717 } 716 }
718 717
719 return status; 718 return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
813 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 812 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
814 .get_media_type = &ixgbe_get_media_type_X540, 813 .get_media_type = &ixgbe_get_media_type_X540,
815 .get_supported_physical_layer = 814 .get_supported_physical_layer =
816 &ixgbe_get_supported_physical_layer_X540, 815 &ixgbe_get_supported_physical_layer_X540,
817 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 816 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
818 .get_mac_addr = &ixgbe_get_mac_addr_generic, 817 .get_mac_addr = &ixgbe_get_mac_addr_generic,
819 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 818 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1baecb60f065..d420f124633f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -135,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
135 ethtool_cmd_speed_set(ecmd, speed); 135 ethtool_cmd_speed_set(ecmd, speed);
136 ecmd->duplex = DUPLEX_FULL; 136 ecmd->duplex = DUPLEX_FULL;
137 } else { 137 } else {
138 ethtool_cmd_speed_set(ecmd, -1); 138 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
139 ecmd->duplex = -1; 139 ecmd->duplex = DUPLEX_UNKNOWN;
140 } 140 }
141 141
142 return 0; 142 return 0;
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
813 813
814void ixgbevf_set_ethtool_ops(struct net_device *netdev) 814void ixgbevf_set_ethtool_ops(struct net_device *netdev)
815{ 815{
816 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); 816 netdev->ethtool_ops = &ixgbevf_ethtool_ops;
817} 817}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de2793b06305..75467f83772c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86 86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 88MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
89MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION); 90MODULE_VERSION(DRV_VERSION);
91 91
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b7b8d74c22d9..b151a949f352 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -42,6 +42,7 @@
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/in.h> 43#include <linux/in.h>
44#include <linux/ip.h> 44#include <linux/ip.h>
45#include <net/tso.h>
45#include <linux/tcp.h> 46#include <linux/tcp.h>
46#include <linux/udp.h> 47#include <linux/udp.h>
47#include <linux/etherdevice.h> 48#include <linux/etherdevice.h>
@@ -179,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4";
179 * Misc definitions. 180 * Misc definitions.
180 */ 181 */
181#define DEFAULT_RX_QUEUE_SIZE 128 182#define DEFAULT_RX_QUEUE_SIZE 128
182#define DEFAULT_TX_QUEUE_SIZE 256 183#define DEFAULT_TX_QUEUE_SIZE 512
183#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 184#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
184 185
186#define TSO_HEADER_SIZE 128
185 187
188/* Max number of allowed TCP segments for software TSO */
189#define MV643XX_MAX_TSO_SEGS 100
190#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
191
192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
186/* 195/*
187 * RX/TX descriptors. 196 * RX/TX descriptors.
188 */ 197 */
@@ -250,6 +259,7 @@ struct tx_desc {
250#define GEN_TCP_UDP_CHECKSUM 0x00020000 259#define GEN_TCP_UDP_CHECKSUM 0x00020000
251#define UDP_FRAME 0x00010000 260#define UDP_FRAME 0x00010000
252#define MAC_HDR_EXTRA_4_BYTES 0x00008000 261#define MAC_HDR_EXTRA_4_BYTES 0x00008000
262#define GEN_TCP_UDP_CHK_FULL 0x00000400
253#define MAC_HDR_EXTRA_8_BYTES 0x00000200 263#define MAC_HDR_EXTRA_8_BYTES 0x00000200
254 264
255#define TX_IHL_SHIFT 11 265#define TX_IHL_SHIFT 11
@@ -345,6 +355,12 @@ struct tx_queue {
345 int tx_curr_desc; 355 int tx_curr_desc;
346 int tx_used_desc; 356 int tx_used_desc;
347 357
358 int tx_stop_threshold;
359 int tx_wake_threshold;
360
361 char *tso_hdrs;
362 dma_addr_t tso_hdrs_dma;
363
348 struct tx_desc *tx_desc_area; 364 struct tx_desc *tx_desc_area;
349 dma_addr_t tx_desc_dma; 365 dma_addr_t tx_desc_dma;
350 int tx_desc_area_size; 366 int tx_desc_area_size;
@@ -491,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
491 507
492 if (netif_tx_queue_stopped(nq)) { 508 if (netif_tx_queue_stopped(nq)) {
493 __netif_tx_lock(nq, smp_processor_id()); 509 __netif_tx_lock(nq, smp_processor_id());
494 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 510 if (txq->tx_desc_count <= txq->tx_wake_threshold)
495 netif_tx_wake_queue(nq); 511 netif_tx_wake_queue(nq);
496 __netif_tx_unlock(nq); 512 __netif_tx_unlock(nq);
497 } 513 }
@@ -661,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
661 return 0; 677 return 0;
662} 678}
663 679
680static inline __be16 sum16_as_be(__sum16 sum)
681{
682 return (__force __be16)sum;
683}
684
685static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
686 u16 *l4i_chk, u32 *command, int length)
687{
688 int ret;
689 u32 cmd = 0;
690
691 if (skb->ip_summed == CHECKSUM_PARTIAL) {
692 int hdr_len;
693 int tag_bytes;
694
695 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
696 skb->protocol != htons(ETH_P_8021Q));
697
698 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
699 tag_bytes = hdr_len - ETH_HLEN;
700
701 if (length - hdr_len > mp->shared->tx_csum_limit ||
702 unlikely(tag_bytes & ~12)) {
703 ret = skb_checksum_help(skb);
704 if (!ret)
705 goto no_csum;
706 return ret;
707 }
708
709 if (tag_bytes & 4)
710 cmd |= MAC_HDR_EXTRA_4_BYTES;
711 if (tag_bytes & 8)
712 cmd |= MAC_HDR_EXTRA_8_BYTES;
713
714 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
715 GEN_IP_V4_CHECKSUM |
716 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
717
718 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
719 * it seems we don't need to pass the initial checksum. */
720 switch (ip_hdr(skb)->protocol) {
721 case IPPROTO_UDP:
722 cmd |= UDP_FRAME;
723 *l4i_chk = 0;
724 break;
725 case IPPROTO_TCP:
726 *l4i_chk = 0;
727 break;
728 default:
729 WARN(1, "protocol not supported");
730 }
731 } else {
732no_csum:
733 /* Errata BTS #50, IHL must be 5 if no HW checksum */
734 cmd |= 5 << TX_IHL_SHIFT;
735 }
736 *command = cmd;
737 return 0;
738}
739
740static inline int
741txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
742 struct sk_buff *skb, char *data, int length,
743 bool last_tcp, bool is_last)
744{
745 int tx_index;
746 u32 cmd_sts;
747 struct tx_desc *desc;
748
749 tx_index = txq->tx_curr_desc++;
750 if (txq->tx_curr_desc == txq->tx_ring_size)
751 txq->tx_curr_desc = 0;
752 desc = &txq->tx_desc_area[tx_index];
753
754 desc->l4i_chk = 0;
755 desc->byte_cnt = length;
756 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
757 length, DMA_TO_DEVICE);
758 if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
759 WARN(1, "dma_map_single failed!\n");
760 return -ENOMEM;
761 }
762
763 cmd_sts = BUFFER_OWNED_BY_DMA;
764 if (last_tcp) {
765 /* last descriptor in the TCP packet */
766 cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
767 /* last descriptor in SKB */
768 if (is_last)
769 cmd_sts |= TX_ENABLE_INTERRUPT;
770 }
771 desc->cmd_sts = cmd_sts;
772 return 0;
773}
774
775static inline void
776txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
777{
778 struct mv643xx_eth_private *mp = txq_to_mp(txq);
779 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
780 int tx_index;
781 struct tx_desc *desc;
782 int ret;
783 u32 cmd_csum = 0;
784 u16 l4i_chk = 0;
785
786 tx_index = txq->tx_curr_desc;
787 desc = &txq->tx_desc_area[tx_index];
788
789 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
790 if (ret)
791 WARN(1, "failed to prepare checksum!");
792
793 /* Should we set this? Can't use the value from skb_tx_csum()
794 * as it's not the correct initial L4 checksum to use. */
795 desc->l4i_chk = 0;
796
797 desc->byte_cnt = hdr_len;
798 desc->buf_ptr = txq->tso_hdrs_dma +
799 txq->tx_curr_desc * TSO_HEADER_SIZE;
800 desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
801 GEN_CRC;
802
803 txq->tx_curr_desc++;
804 if (txq->tx_curr_desc == txq->tx_ring_size)
805 txq->tx_curr_desc = 0;
806}
807
808static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
809 struct net_device *dev)
810{
811 struct mv643xx_eth_private *mp = txq_to_mp(txq);
812 int total_len, data_left, ret;
813 int desc_count = 0;
814 struct tso_t tso;
815 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
816
817 /* Count needed descriptors */
818 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
819 netdev_dbg(dev, "not enough descriptors for TSO!\n");
820 return -EBUSY;
821 }
822
823 /* Initialize the TSO handler, and prepare the first payload */
824 tso_start(skb, &tso);
825
826 total_len = skb->len - hdr_len;
827 while (total_len > 0) {
828 char *hdr;
829
830 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
831 total_len -= data_left;
832 desc_count++;
833
834 /* prepare packet headers: MAC + IP + TCP */
835 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
836 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
837 txq_put_hdr_tso(skb, txq, data_left);
838
839 while (data_left > 0) {
840 int size;
841 desc_count++;
842
843 size = min_t(int, tso.size, data_left);
844 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
845 size == data_left,
846 total_len == 0);
847 if (ret)
848 goto err_release;
849 data_left -= size;
850 tso_build_data(skb, &tso, size);
851 }
852 }
853
854 __skb_queue_tail(&txq->tx_skb, skb);
855 skb_tx_timestamp(skb);
856
857 /* clear TX_END status */
858 mp->work_tx_end &= ~(1 << txq->index);
859
860 /* ensure all descriptors are written before poking hardware */
861 wmb();
862 txq_enable(txq);
863 txq->tx_desc_count += desc_count;
864 return 0;
865err_release:
866 /* TODO: Release all used data descriptors; header descriptors must not
867 * be DMA-unmapped.
868 */
869 return ret;
870}
871
664static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 872static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
665{ 873{
666 struct mv643xx_eth_private *mp = txq_to_mp(txq); 874 struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
671 skb_frag_t *this_frag; 879 skb_frag_t *this_frag;
672 int tx_index; 880 int tx_index;
673 struct tx_desc *desc; 881 struct tx_desc *desc;
882 void *addr;
674 883
675 this_frag = &skb_shinfo(skb)->frags[frag]; 884 this_frag = &skb_shinfo(skb)->frags[frag];
885 addr = page_address(this_frag->page.p) + this_frag->page_offset;
676 tx_index = txq->tx_curr_desc++; 886 tx_index = txq->tx_curr_desc++;
677 if (txq->tx_curr_desc == txq->tx_ring_size) 887 if (txq->tx_curr_desc == txq->tx_ring_size)
678 txq->tx_curr_desc = 0; 888 txq->tx_curr_desc = 0;
@@ -692,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
692 902
693 desc->l4i_chk = 0; 903 desc->l4i_chk = 0;
694 desc->byte_cnt = skb_frag_size(this_frag); 904 desc->byte_cnt = skb_frag_size(this_frag);
695 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, 905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
696 this_frag, 0, 906 desc->byte_cnt, DMA_TO_DEVICE);
697 skb_frag_size(this_frag),
698 DMA_TO_DEVICE);
699 } 907 }
700} 908}
701 909
702static inline __be16 sum16_as_be(__sum16 sum) 910static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
703{ 911 struct net_device *dev)
704 return (__force __be16)sum;
705}
706
707static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
708{ 912{
709 struct mv643xx_eth_private *mp = txq_to_mp(txq); 913 struct mv643xx_eth_private *mp = txq_to_mp(txq);
710 int nr_frags = skb_shinfo(skb)->nr_frags; 914 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -712,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
712 struct tx_desc *desc; 916 struct tx_desc *desc;
713 u32 cmd_sts; 917 u32 cmd_sts;
714 u16 l4i_chk; 918 u16 l4i_chk;
715 int length; 919 int length, ret;
716 920
717 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 921 cmd_sts = 0;
718 l4i_chk = 0; 922 l4i_chk = 0;
719 923
720 if (skb->ip_summed == CHECKSUM_PARTIAL) { 924 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
721 int hdr_len; 925 if (net_ratelimit())
722 int tag_bytes; 926 netdev_err(dev, "tx queue full?!\n");
723 927 return -EBUSY;
724 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
725 skb->protocol != htons(ETH_P_8021Q));
726
727 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
728 tag_bytes = hdr_len - ETH_HLEN;
729 if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
730 unlikely(tag_bytes & ~12)) {
731 if (skb_checksum_help(skb) == 0)
732 goto no_csum;
733 dev_kfree_skb_any(skb);
734 return 1;
735 }
736
737 if (tag_bytes & 4)
738 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
739 if (tag_bytes & 8)
740 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
741
742 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
743 GEN_IP_V4_CHECKSUM |
744 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
745
746 switch (ip_hdr(skb)->protocol) {
747 case IPPROTO_UDP:
748 cmd_sts |= UDP_FRAME;
749 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
750 break;
751 case IPPROTO_TCP:
752 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
753 break;
754 default:
755 BUG();
756 }
757 } else {
758no_csum:
759 /* Errata BTS #50, IHL must be 5 if no HW checksum */
760 cmd_sts |= 5 << TX_IHL_SHIFT;
761 } 928 }
762 929
930 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
931 if (ret)
932 return ret;
933 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
934
763 tx_index = txq->tx_curr_desc++; 935 tx_index = txq->tx_curr_desc++;
764 if (txq->tx_curr_desc == txq->tx_ring_size) 936 if (txq->tx_curr_desc == txq->tx_ring_size)
765 txq->tx_curr_desc = 0; 937 txq->tx_curr_desc = 0;
@@ -801,7 +973,7 @@ no_csum:
801static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 973static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
802{ 974{
803 struct mv643xx_eth_private *mp = netdev_priv(dev); 975 struct mv643xx_eth_private *mp = netdev_priv(dev);
804 int length, queue; 976 int length, queue, ret;
805 struct tx_queue *txq; 977 struct tx_queue *txq;
806 struct netdev_queue *nq; 978 struct netdev_queue *nq;
807 979
@@ -810,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
810 nq = netdev_get_tx_queue(dev, queue); 982 nq = netdev_get_tx_queue(dev, queue);
811 983
812 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 984 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
813 txq->tx_dropped++;
814 netdev_printk(KERN_DEBUG, dev, 985 netdev_printk(KERN_DEBUG, dev,
815 "failed to linearize skb with tiny unaligned fragment\n"); 986 "failed to linearize skb with tiny unaligned fragment\n");
816 return NETDEV_TX_BUSY; 987 return NETDEV_TX_BUSY;
817 } 988 }
818 989
819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
820 if (net_ratelimit())
821 netdev_err(dev, "tx queue full?!\n");
822 dev_kfree_skb_any(skb);
823 return NETDEV_TX_OK;
824 }
825
826 length = skb->len; 990 length = skb->len;
827 991
828 if (!txq_submit_skb(txq, skb)) { 992 if (skb_is_gso(skb))
829 int entries_left; 993 ret = txq_submit_tso(txq, skb, dev);
830 994 else
995 ret = txq_submit_skb(txq, skb, dev);
996 if (!ret) {
831 txq->tx_bytes += length; 997 txq->tx_bytes += length;
832 txq->tx_packets++; 998 txq->tx_packets++;
833 999
834 entries_left = txq->tx_ring_size - txq->tx_desc_count; 1000 if (txq->tx_desc_count >= txq->tx_stop_threshold)
835 if (entries_left < MAX_SKB_FRAGS + 1)
836 netif_tx_stop_queue(nq); 1001 netif_tx_stop_queue(nq);
1002 } else {
1003 txq->tx_dropped++;
1004 dev_kfree_skb_any(skb);
837 } 1005 }
838 1006
839 return NETDEV_TX_OK; 1007 return NETDEV_TX_OK;
@@ -907,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
907 mp->dev->stats.tx_errors++; 1075 mp->dev->stats.tx_errors++;
908 } 1076 }
909 1077
910 if (cmd_sts & TX_FIRST_DESC) { 1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
911 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 1079 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
912 desc->byte_cnt, DMA_TO_DEVICE); 1080 desc->byte_cnt, DMA_TO_DEVICE);
913 } else {
914 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
915 desc->byte_cnt, DMA_TO_DEVICE);
916 }
917
918 dev_kfree_skb(skb); 1081 dev_kfree_skb(skb);
919 } 1082 }
920 1083
@@ -1010,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1010 1173
1011 1174
1012/* mii management interface *************************************************/ 1175/* mii management interface *************************************************/
1013static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) 1176static void mv643xx_eth_adjust_link(struct net_device *dev)
1014{ 1177{
1178 struct mv643xx_eth_private *mp = netdev_priv(dev);
1015 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 1179 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1016 u32 autoneg_disable = FORCE_LINK_PASS | 1180 u32 autoneg_disable = FORCE_LINK_PASS |
1017 DISABLE_AUTO_NEG_SPEED_GMII | 1181 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1387 1551
1388 ret = phy_ethtool_sset(mp->phy, cmd); 1552 ret = phy_ethtool_sset(mp->phy, cmd);
1389 if (!ret) 1553 if (!ret)
1390 mv643xx_adjust_pscr(mp); 1554 mv643xx_eth_adjust_link(dev);
1391 return ret; 1555 return ret;
1392} 1556}
1393 1557
@@ -1456,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1456 return -EINVAL; 1620 return -EINVAL;
1457 1621
1458 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1622 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1459 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1623 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1624 MV643XX_MAX_SKB_DESCS * 2, 4096);
1625 if (mp->tx_ring_size != er->tx_pending)
1626 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1627 mp->tx_ring_size, er->tx_pending);
1460 1628
1461 if (netif_running(dev)) { 1629 if (netif_running(dev)) {
1462 mv643xx_eth_stop(dev); 1630 mv643xx_eth_stop(dev);
@@ -1832,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1832 2000
1833 txq->tx_ring_size = mp->tx_ring_size; 2001 txq->tx_ring_size = mp->tx_ring_size;
1834 2002
2003 /* A queue must always have room for at least one skb.
2004 * Therefore, stop the queue when the free entries reaches
2005 * the maximum number of descriptors per skb.
2006 */
2007 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2008 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2009
1835 txq->tx_desc_count = 0; 2010 txq->tx_desc_count = 0;
1836 txq->tx_curr_desc = 0; 2011 txq->tx_curr_desc = 0;
1837 txq->tx_used_desc = 0; 2012 txq->tx_used_desc = 0;
@@ -1871,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1871 nexti * sizeof(struct tx_desc); 2046 nexti * sizeof(struct tx_desc);
1872 } 2047 }
1873 2048
2049 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2050 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2051 txq->tx_ring_size * TSO_HEADER_SIZE,
2052 &txq->tso_hdrs_dma, GFP_KERNEL);
2053 if (txq->tso_hdrs == NULL) {
2054 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2055 txq->tx_desc_area, txq->tx_desc_dma);
2056 return -ENOMEM;
2057 }
1874 skb_queue_head_init(&txq->tx_skb); 2058 skb_queue_head_init(&txq->tx_skb);
1875 2059
1876 return 0; 2060 return 0;
@@ -1891,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq)
1891 else 2075 else
1892 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2076 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
1893 txq->tx_desc_area, txq->tx_desc_dma); 2077 txq->tx_desc_area, txq->tx_desc_dma);
2078 if (txq->tso_hdrs)
2079 dma_free_coherent(mp->dev->dev.parent,
2080 txq->tx_ring_size * TSO_HEADER_SIZE,
2081 txq->tso_hdrs, txq->tso_hdrs_dma);
1894} 2082}
1895 2083
1896 2084
@@ -2303,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2303 2491
2304 ret = phy_mii_ioctl(mp->phy, ifr, cmd); 2492 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2305 if (!ret) 2493 if (!ret)
2306 mv643xx_adjust_pscr(mp); 2494 mv643xx_eth_adjust_link(dev);
2307 return ret; 2495 return ret;
2308} 2496}
2309 2497
@@ -2678,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp,
2678 struct mv643xx_eth_platform_data *pd) 2866 struct mv643xx_eth_platform_data *pd)
2679{ 2867{
2680 struct net_device *dev = mp->dev; 2868 struct net_device *dev = mp->dev;
2869 unsigned int tx_ring_size;
2681 2870
2682 if (is_valid_ether_addr(pd->mac_addr)) 2871 if (is_valid_ether_addr(pd->mac_addr))
2683 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2872 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
@@ -2692,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp,
2692 2881
2693 mp->rxq_count = pd->rx_queue_count ? : 1; 2882 mp->rxq_count = pd->rx_queue_count ? : 1;
2694 2883
2695 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2884 tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2696 if (pd->tx_queue_size) 2885 if (pd->tx_queue_size)
2697 mp->tx_ring_size = pd->tx_queue_size; 2886 tx_ring_size = pd->tx_queue_size;
2887
2888 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2889 MV643XX_MAX_SKB_DESCS * 2, 4096);
2890 if (mp->tx_ring_size != tx_ring_size)
2891 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2892 mp->tx_ring_size, tx_ring_size);
2893
2698 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2894 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2699 mp->tx_desc_sram_size = pd->tx_sram_size; 2895 mp->tx_desc_sram_size = pd->tx_sram_size;
2700 2896
2701 mp->txq_count = pd->tx_queue_count ? : 1; 2897 mp->txq_count = pd->tx_queue_count ? : 1;
2702} 2898}
2703 2899
2704static void mv643xx_eth_adjust_link(struct net_device *dev)
2705{
2706 struct mv643xx_eth_private *mp = netdev_priv(dev);
2707
2708 mv643xx_adjust_pscr(mp);
2709}
2710
2711static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2900static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2712 int phy_addr) 2901 int phy_addr)
2713{ 2902{
@@ -2889,7 +3078,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2889 if (err) 3078 if (err)
2890 goto out; 3079 goto out;
2891 3080
2892 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 3081 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
2893 3082
2894 init_pscr(mp, pd->speed, pd->duplex); 3083 init_pscr(mp, pd->speed, pd->duplex);
2895 3084
@@ -2921,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2921 dev->watchdog_timeo = 2 * HZ; 3110 dev->watchdog_timeo = 2 * HZ;
2922 dev->base_addr = 0; 3111 dev->base_addr = 0;
2923 3112
2924 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 3113 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2925 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 3114 dev->vlan_features = dev->features;
2926 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 3115
3116 dev->features |= NETIF_F_RXCSUM;
3117 dev->hw_features = dev->features;
2927 3118
2928 dev->priv_flags |= IFF_UNICAST_FLT; 3119 dev->priv_flags |= IFF_UNICAST_FLT;
3120 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
2929 3121
2930 SET_NETDEV_DEV(dev, &pdev->dev); 3122 SET_NETDEV_DEV(dev, &pdev->dev);
2931 3123
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced263a5e..fc2fb25343f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
195 return -ENODEV; 195 return -ENODEV;
196 } 196 }
197 197
198 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); 198 bus = devm_mdiobus_alloc_size(&pdev->dev,
199 if (!bus) { 199 sizeof(struct orion_mdio_dev));
200 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); 200 if (!bus)
201 return -ENOMEM; 201 return -ENOMEM;
202 }
203 202
204 bus->name = "orion_mdio_bus"; 203 bus->name = "orion_mdio_bus";
205 bus->read = orion_mdio_read; 204 bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
208 dev_name(&pdev->dev)); 207 dev_name(&pdev->dev));
209 bus->parent = &pdev->dev; 208 bus->parent = &pdev->dev;
210 209
211 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 210 bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
212 if (!bus->irq) { 211 GFP_KERNEL);
213 mdiobus_free(bus); 212 if (!bus->irq)
214 return -ENOMEM; 213 return -ENOMEM;
215 }
216 214
217 for (i = 0; i < PHY_MAX_ADDR; i++) 215 for (i = 0; i < PHY_MAX_ADDR; i++)
218 bus->irq[i] = PHY_POLL; 216 bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
264out_mdio: 262out_mdio:
265 if (!IS_ERR(dev->clk)) 263 if (!IS_ERR(dev->clk))
266 clk_disable_unprepare(dev->clk); 264 clk_disable_unprepare(dev->clk);
267 kfree(bus->irq);
268 mdiobus_free(bus);
269 return ret; 265 return ret;
270} 266}
271 267
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
276 272
277 writel(0, dev->regs + MVMDIO_ERR_INT_MASK); 273 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
278 mdiobus_unregister(bus); 274 mdiobus_unregister(bus);
279 kfree(bus->irq);
280 mdiobus_free(bus);
281 if (!IS_ERR(dev->clk)) 275 if (!IS_ERR(dev->clk))
282 clk_disable_unprepare(dev->clk); 276 clk_disable_unprepare(dev->clk);
283 277
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 14786c8bf99e..45beca17fa50 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -23,6 +23,7 @@
23#include <net/ip.h> 23#include <net/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <net/tso.h>
26#include <linux/of.h> 27#include <linux/of.h>
27#include <linux/of_irq.h> 28#include <linux/of_irq.h>
28#include <linux/of_mdio.h> 29#include <linux/of_mdio.h>
@@ -218,9 +219,6 @@
218#define MVNETA_RX_COAL_PKTS 32 219#define MVNETA_RX_COAL_PKTS 32
219#define MVNETA_RX_COAL_USEC 100 220#define MVNETA_RX_COAL_USEC 100
220 221
221/* Napi polling weight */
222#define MVNETA_RX_POLL_WEIGHT 64
223
224/* The two bytes Marvell header. Either contains a special value used 222/* The two bytes Marvell header. Either contains a special value used
225 * by Marvell switches when a specific hardware mode is enabled (not 223 * by Marvell switches when a specific hardware mode is enabled (not
226 * supported by this driver) or is filled automatically by zeroes on 224 * supported by this driver) or is filled automatically by zeroes on
@@ -244,12 +242,20 @@
244 242
245#define MVNETA_TX_MTU_MAX 0x3ffff 243#define MVNETA_TX_MTU_MAX 0x3ffff
246 244
245/* TSO header size */
246#define TSO_HEADER_SIZE 128
247
247/* Max number of Rx descriptors */ 248/* Max number of Rx descriptors */
248#define MVNETA_MAX_RXD 128 249#define MVNETA_MAX_RXD 128
249 250
250/* Max number of Tx descriptors */ 251/* Max number of Tx descriptors */
251#define MVNETA_MAX_TXD 532 252#define MVNETA_MAX_TXD 532
252 253
254/* Max number of allowed TCP segments for software TSO */
255#define MVNETA_MAX_TSO_SEGS 100
256
257#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
258
253/* descriptor aligned size */ 259/* descriptor aligned size */
254#define MVNETA_DESC_ALIGNED_SIZE 32 260#define MVNETA_DESC_ALIGNED_SIZE 32
255 261
@@ -258,6 +264,10 @@
258 ETH_HLEN + ETH_FCS_LEN, \ 264 ETH_HLEN + ETH_FCS_LEN, \
259 MVNETA_CPU_D_CACHE_LINE_SIZE) 265 MVNETA_CPU_D_CACHE_LINE_SIZE)
260 266
267#define IS_TSO_HEADER(txq, addr) \
268 ((addr >= txq->tso_hdrs_phys) && \
269 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
270
261#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 271#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
262 272
263struct mvneta_pcpu_stats { 273struct mvneta_pcpu_stats {
@@ -279,9 +289,6 @@ struct mvneta_port {
279 u32 cause_rx_tx; 289 u32 cause_rx_tx;
280 struct napi_struct napi; 290 struct napi_struct napi;
281 291
282 /* Napi weight */
283 int weight;
284
285 /* Core clock */ 292 /* Core clock */
286 struct clk *clk; 293 struct clk *clk;
287 u8 mcast_count[256]; 294 u8 mcast_count[256];
@@ -390,6 +397,8 @@ struct mvneta_tx_queue {
390 * descriptor ring 397 * descriptor ring
391 */ 398 */
392 int count; 399 int count;
400 int tx_stop_threshold;
401 int tx_wake_threshold;
393 402
394 /* Array of transmitted skb */ 403 /* Array of transmitted skb */
395 struct sk_buff **tx_skb; 404 struct sk_buff **tx_skb;
@@ -413,6 +422,12 @@ struct mvneta_tx_queue {
413 422
414 /* Index of the next TX DMA descriptor to process */ 423 /* Index of the next TX DMA descriptor to process */
415 int next_desc_to_proc; 424 int next_desc_to_proc;
425
426 /* DMA buffers for TSO headers */
427 char *tso_hdrs;
428
429 /* DMA address of TSO headers */
430 dma_addr_t tso_hdrs_phys;
416}; 431};
417 432
418struct mvneta_rx_queue { 433struct mvneta_rx_queue {
@@ -441,7 +456,10 @@ struct mvneta_rx_queue {
441 int next_desc_to_proc; 456 int next_desc_to_proc;
442}; 457};
443 458
444static int rxq_number = 8; 459/* The hardware supports eight (8) rx queues, but we are only allowing
460 * the first one to be used. Therefore, let's just allocate one queue.
461 */
462static int rxq_number = 1;
445static int txq_number = 8; 463static int txq_number = 8;
446 464
447static int rxq_def; 465static int rxq_def;
@@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1277 1295
1278 mvneta_txq_inc_get(txq); 1296 mvneta_txq_inc_get(txq);
1279 1297
1298 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1299 dma_unmap_single(pp->dev->dev.parent,
1300 tx_desc->buf_phys_addr,
1301 tx_desc->data_size, DMA_TO_DEVICE);
1280 if (!skb) 1302 if (!skb)
1281 continue; 1303 continue;
1282
1283 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1284 tx_desc->data_size, DMA_TO_DEVICE);
1285 dev_kfree_skb_any(skb); 1304 dev_kfree_skb_any(skb);
1286 } 1305 }
1287} 1306}
@@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
1302 txq->count -= tx_done; 1321 txq->count -= tx_done;
1303 1322
1304 if (netif_tx_queue_stopped(nq)) { 1323 if (netif_tx_queue_stopped(nq)) {
1305 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) 1324 if (txq->count <= txq->tx_wake_threshold)
1306 netif_tx_wake_queue(nq); 1325 netif_tx_wake_queue(nq);
1307 } 1326 }
1308} 1327}
@@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1519 return rx_done; 1538 return rx_done;
1520} 1539}
1521 1540
1541static inline void
1542mvneta_tso_put_hdr(struct sk_buff *skb,
1543 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1544{
1545 struct mvneta_tx_desc *tx_desc;
1546 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1547
1548 txq->tx_skb[txq->txq_put_index] = NULL;
1549 tx_desc = mvneta_txq_next_desc_get(txq);
1550 tx_desc->data_size = hdr_len;
1551 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1552 tx_desc->command |= MVNETA_TXD_F_DESC;
1553 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1554 txq->txq_put_index * TSO_HEADER_SIZE;
1555 mvneta_txq_inc_put(txq);
1556}
1557
1558static inline int
1559mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1560 struct sk_buff *skb, char *data, int size,
1561 bool last_tcp, bool is_last)
1562{
1563 struct mvneta_tx_desc *tx_desc;
1564
1565 tx_desc = mvneta_txq_next_desc_get(txq);
1566 tx_desc->data_size = size;
1567 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1568 size, DMA_TO_DEVICE);
1569 if (unlikely(dma_mapping_error(dev->dev.parent,
1570 tx_desc->buf_phys_addr))) {
1571 mvneta_txq_desc_put(txq);
1572 return -ENOMEM;
1573 }
1574
1575 tx_desc->command = 0;
1576 txq->tx_skb[txq->txq_put_index] = NULL;
1577
1578 if (last_tcp) {
1579 /* last descriptor in the TCP packet */
1580 tx_desc->command = MVNETA_TXD_L_DESC;
1581
1582 /* last descriptor in SKB */
1583 if (is_last)
1584 txq->tx_skb[txq->txq_put_index] = skb;
1585 }
1586 mvneta_txq_inc_put(txq);
1587 return 0;
1588}
1589
1590static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1591 struct mvneta_tx_queue *txq)
1592{
1593 int total_len, data_left;
1594 int desc_count = 0;
1595 struct mvneta_port *pp = netdev_priv(dev);
1596 struct tso_t tso;
1597 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1598 int i;
1599
1600 /* Count needed descriptors */
1601 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1602 return 0;
1603
1604 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1605 pr_info("*** Is this even possible???!?!?\n");
1606 return 0;
1607 }
1608
1609 /* Initialize the TSO handler, and prepare the first payload */
1610 tso_start(skb, &tso);
1611
1612 total_len = skb->len - hdr_len;
1613 while (total_len > 0) {
1614 char *hdr;
1615
1616 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1617 total_len -= data_left;
1618 desc_count++;
1619
1620 /* prepare packet headers: MAC + IP + TCP */
1621 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1622 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1623
1624 mvneta_tso_put_hdr(skb, pp, txq);
1625
1626 while (data_left > 0) {
1627 int size;
1628 desc_count++;
1629
1630 size = min_t(int, tso.size, data_left);
1631
1632 if (mvneta_tso_put_data(dev, txq, skb,
1633 tso.data, size,
1634 size == data_left,
1635 total_len == 0))
1636 goto err_release;
1637 data_left -= size;
1638
1639 tso_build_data(skb, &tso, size);
1640 }
1641 }
1642
1643 return desc_count;
1644
1645err_release:
1646 /* Release all used data descriptors; header descriptors must not
1647 * be DMA-unmapped.
1648 */
1649 for (i = desc_count - 1; i >= 0; i--) {
1650 struct mvneta_tx_desc *tx_desc = txq->descs + i;
1651 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1652 dma_unmap_single(pp->dev->dev.parent,
1653 tx_desc->buf_phys_addr,
1654 tx_desc->data_size,
1655 DMA_TO_DEVICE);
1656 mvneta_txq_desc_put(txq);
1657 }
1658 return 0;
1659}
1660
1522/* Handle tx fragmentation processing */ 1661/* Handle tx fragmentation processing */
1523static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1662static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1524 struct mvneta_tx_queue *txq) 1663 struct mvneta_tx_queue *txq)
1525{ 1664{
1526 struct mvneta_tx_desc *tx_desc; 1665 struct mvneta_tx_desc *tx_desc;
1527 int i; 1666 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1528 1667
1529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1668 for (i = 0; i < nr_frags; i++) {
1530 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1669 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1531 void *addr = page_address(frag->page.p) + frag->page_offset; 1670 void *addr = page_address(frag->page.p) + frag->page_offset;
1532 1671
@@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1543 goto error; 1682 goto error;
1544 } 1683 }
1545 1684
1546 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 1685 if (i == nr_frags - 1) {
1547 /* Last descriptor */ 1686 /* Last descriptor */
1548 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1687 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1549
1550 txq->tx_skb[txq->txq_put_index] = skb; 1688 txq->tx_skb[txq->txq_put_index] = skb;
1551
1552 mvneta_txq_inc_put(txq);
1553 } else { 1689 } else {
1554 /* Descriptor in the middle: Not First, Not Last */ 1690 /* Descriptor in the middle: Not First, Not Last */
1555 tx_desc->command = 0; 1691 tx_desc->command = 0;
1556
1557 txq->tx_skb[txq->txq_put_index] = NULL; 1692 txq->tx_skb[txq->txq_put_index] = NULL;
1558 mvneta_txq_inc_put(txq);
1559 } 1693 }
1694 mvneta_txq_inc_put(txq);
1560 } 1695 }
1561 1696
1562 return 0; 1697 return 0;
@@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1584 u16 txq_id = skb_get_queue_mapping(skb); 1719 u16 txq_id = skb_get_queue_mapping(skb);
1585 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1720 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1586 struct mvneta_tx_desc *tx_desc; 1721 struct mvneta_tx_desc *tx_desc;
1587 struct netdev_queue *nq;
1588 int frags = 0; 1722 int frags = 0;
1589 u32 tx_cmd; 1723 u32 tx_cmd;
1590 1724
1591 if (!netif_running(dev)) 1725 if (!netif_running(dev))
1592 goto out; 1726 goto out;
1593 1727
1728 if (skb_is_gso(skb)) {
1729 frags = mvneta_tx_tso(skb, dev, txq);
1730 goto out;
1731 }
1732
1594 frags = skb_shinfo(skb)->nr_frags + 1; 1733 frags = skb_shinfo(skb)->nr_frags + 1;
1595 nq = netdev_get_tx_queue(dev, txq_id);
1596 1734
1597 /* Get a descriptor for the first part of the packet */ 1735 /* Get a descriptor for the first part of the packet */
1598 tx_desc = mvneta_txq_next_desc_get(txq); 1736 tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1635 } 1773 }
1636 } 1774 }
1637 1775
1638 txq->count += frags;
1639 mvneta_txq_pend_desc_add(pp, txq, frags);
1640
1641 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1642 netif_tx_stop_queue(nq);
1643
1644out: 1776out:
1645 if (frags > 0) { 1777 if (frags > 0) {
1646 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1778 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1779 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1780
1781 txq->count += frags;
1782 mvneta_txq_pend_desc_add(pp, txq, frags);
1783
1784 if (txq->count >= txq->tx_stop_threshold)
1785 netif_tx_stop_queue(nq);
1647 1786
1648 u64_stats_update_begin(&stats->syncp); 1787 u64_stats_update_begin(&stats->syncp);
1649 stats->tx_packets++; 1788 stats->tx_packets++;
@@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
2003{ 2142{
2004 int queue; 2143 int queue;
2005 2144
2006 /* free the skb's in the hal tx ring */ 2145 /* free the skb's in the tx ring */
2007 for (queue = 0; queue < txq_number; queue++) 2146 for (queue = 0; queue < txq_number; queue++)
2008 mvneta_txq_done_force(pp, &pp->txqs[queue]); 2147 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2009 2148
@@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2081{ 2220{
2082 txq->size = pp->tx_ring_size; 2221 txq->size = pp->tx_ring_size;
2083 2222
2223 /* A queue must always have room for at least one skb.
2224 * Therefore, stop the queue when the free entries reaches
2225 * the maximum number of descriptors per skb.
2226 */
2227 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2228 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2229
2230
2084 /* Allocate memory for TX descriptors */ 2231 /* Allocate memory for TX descriptors */
2085 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2232 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2086 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2233 txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2109 txq->descs, txq->descs_phys); 2256 txq->descs, txq->descs_phys);
2110 return -ENOMEM; 2257 return -ENOMEM;
2111 } 2258 }
2259
2260 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2261 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2262 txq->size * TSO_HEADER_SIZE,
2263 &txq->tso_hdrs_phys, GFP_KERNEL);
2264 if (txq->tso_hdrs == NULL) {
2265 kfree(txq->tx_skb);
2266 dma_free_coherent(pp->dev->dev.parent,
2267 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2268 txq->descs, txq->descs_phys);
2269 return -ENOMEM;
2270 }
2112 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2271 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2113 2272
2114 return 0; 2273 return 0;
@@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
2120{ 2279{
2121 kfree(txq->tx_skb); 2280 kfree(txq->tx_skb);
2122 2281
2282 if (txq->tso_hdrs)
2283 dma_free_coherent(pp->dev->dev.parent,
2284 txq->size * TSO_HEADER_SIZE,
2285 txq->tso_hdrs, txq->tso_hdrs_phys);
2123 if (txq->descs) 2286 if (txq->descs)
2124 dma_free_coherent(pp->dev->dev.parent, 2287 dma_free_coherent(pp->dev->dev.parent,
2125 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2288 txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2279 return 0; 2442 return 0;
2280 2443
2281 /* The interface is running, so we have to force a 2444 /* The interface is running, so we have to force a
2282 * reallocation of the RXQs 2445 * reallocation of the queues
2283 */ 2446 */
2284 mvneta_stop_dev(pp); 2447 mvneta_stop_dev(pp);
2285 2448
2286 mvneta_cleanup_txqs(pp); 2449 mvneta_cleanup_txqs(pp);
2287 mvneta_cleanup_rxqs(pp); 2450 mvneta_cleanup_rxqs(pp);
2288 2451
2289 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2452 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2290 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2453 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2291 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2454 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2292 2455
2293 ret = mvneta_setup_rxqs(pp); 2456 ret = mvneta_setup_rxqs(pp);
2294 if (ret) { 2457 if (ret) {
2295 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); 2458 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2296 return ret; 2459 return ret;
2297 } 2460 }
2298 2461
2299 mvneta_setup_txqs(pp); 2462 ret = mvneta_setup_txqs(pp);
2463 if (ret) {
2464 netdev_err(dev, "unable to setup txqs after MTU change\n");
2465 return ret;
2466 }
2300 2467
2301 mvneta_start_dev(pp); 2468 mvneta_start_dev(pp);
2302 mvneta_port_up(pp); 2469 mvneta_port_up(pp);
@@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2323static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2490static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2324{ 2491{
2325 struct mvneta_port *pp = netdev_priv(dev); 2492 struct mvneta_port *pp = netdev_priv(dev);
2326 u8 *mac = addr + 2; 2493 struct sockaddr *sockaddr = addr;
2327 int i; 2494 int ret;
2328
2329 if (netif_running(dev))
2330 return -EBUSY;
2331 2495
2496 ret = eth_prepare_mac_addr_change(dev, addr);
2497 if (ret < 0)
2498 return ret;
2332 /* Remove previous address table entry */ 2499 /* Remove previous address table entry */
2333 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2500 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2334 2501
2335 /* Set new addr in hw */ 2502 /* Set new addr in hw */
2336 mvneta_mac_addr_set(pp, mac, rxq_def); 2503 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2337
2338 /* Set addr in the device */
2339 for (i = 0; i < ETH_ALEN; i++)
2340 dev->dev_addr[i] = mac[i];
2341 2504
2505 eth_commit_mac_addr_change(dev, addr);
2342 return 0; 2506 return 0;
2343} 2507}
2344 2508
@@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev)
2433 struct mvneta_port *pp = netdev_priv(dev); 2597 struct mvneta_port *pp = netdev_priv(dev);
2434 int ret; 2598 int ret;
2435 2599
2436 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2437
2438 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2600 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2439 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2601 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2440 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2602 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2600 return -EINVAL; 2762 return -EINVAL;
2601 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 2763 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2602 ring->rx_pending : MVNETA_MAX_RXD; 2764 ring->rx_pending : MVNETA_MAX_RXD;
2603 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? 2765
2604 ring->tx_pending : MVNETA_MAX_TXD; 2766 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2767 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2768 if (pp->tx_ring_size != ring->tx_pending)
2769 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2770 pp->tx_ring_size, ring->tx_pending);
2605 2771
2606 if (netif_running(dev)) { 2772 if (netif_running(dev)) {
2607 mvneta_stop(dev); 2773 mvneta_stop(dev);
@@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
2638}; 2804};
2639 2805
2640/* Initialize hw */ 2806/* Initialize hw */
2641static int mvneta_init(struct mvneta_port *pp, int phy_addr) 2807static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2642{ 2808{
2643 int queue; 2809 int queue;
2644 2810
@@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2648 /* Set port default values */ 2814 /* Set port default values */
2649 mvneta_defaults_set(pp); 2815 mvneta_defaults_set(pp);
2650 2816
2651 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), 2817 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2652 GFP_KERNEL); 2818 GFP_KERNEL);
2653 if (!pp->txqs) 2819 if (!pp->txqs)
2654 return -ENOMEM; 2820 return -ENOMEM;
2655 2821
@@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2661 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2827 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2662 } 2828 }
2663 2829
2664 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), 2830 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2665 GFP_KERNEL); 2831 GFP_KERNEL);
2666 if (!pp->rxqs) { 2832 if (!pp->rxqs)
2667 kfree(pp->txqs);
2668 return -ENOMEM; 2833 return -ENOMEM;
2669 }
2670 2834
2671 /* Create Rx descriptor rings */ 2835 /* Create Rx descriptor rings */
2672 for (queue = 0; queue < rxq_number; queue++) { 2836 for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2680 return 0; 2844 return 0;
2681} 2845}
2682 2846
2683static void mvneta_deinit(struct mvneta_port *pp)
2684{
2685 kfree(pp->txqs);
2686 kfree(pp->rxqs);
2687}
2688
2689/* platform glue : initialize decoding windows */ 2847/* platform glue : initialize decoding windows */
2690static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2848static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2691 const struct mbus_dram_target_info *dram) 2849 const struct mbus_dram_target_info *dram)
@@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev)
2768 struct resource *res; 2926 struct resource *res;
2769 struct device_node *dn = pdev->dev.of_node; 2927 struct device_node *dn = pdev->dev.of_node;
2770 struct device_node *phy_node; 2928 struct device_node *phy_node;
2771 u32 phy_addr;
2772 struct mvneta_port *pp; 2929 struct mvneta_port *pp;
2773 struct net_device *dev; 2930 struct net_device *dev;
2774 const char *dt_mac_addr; 2931 const char *dt_mac_addr;
@@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev)
2797 2954
2798 phy_node = of_parse_phandle(dn, "phy", 0); 2955 phy_node = of_parse_phandle(dn, "phy", 0);
2799 if (!phy_node) { 2956 if (!phy_node) {
2800 dev_err(&pdev->dev, "no associated PHY\n"); 2957 if (!of_phy_is_fixed_link(dn)) {
2801 err = -ENODEV; 2958 dev_err(&pdev->dev, "no PHY specified\n");
2802 goto err_free_irq; 2959 err = -ENODEV;
2960 goto err_free_irq;
2961 }
2962
2963 err = of_phy_register_fixed_link(dn);
2964 if (err < 0) {
2965 dev_err(&pdev->dev, "cannot register fixed PHY\n");
2966 goto err_free_irq;
2967 }
2968
2969 /* In the case of a fixed PHY, the DT node associated
2970 * to the PHY is the Ethernet MAC DT node.
2971 */
2972 phy_node = dn;
2803 } 2973 }
2804 2974
2805 phy_mode = of_get_phy_mode(dn); 2975 phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev)
2813 dev->watchdog_timeo = 5 * HZ; 2983 dev->watchdog_timeo = 5 * HZ;
2814 dev->netdev_ops = &mvneta_netdev_ops; 2984 dev->netdev_ops = &mvneta_netdev_ops;
2815 2985
2816 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); 2986 dev->ethtool_ops = &mvneta_eth_tool_ops;
2817 2987
2818 pp = netdev_priv(dev); 2988 pp = netdev_priv(dev);
2819
2820 pp->weight = MVNETA_RX_POLL_WEIGHT;
2821 pp->phy_node = phy_node; 2989 pp->phy_node = phy_node;
2822 pp->phy_interface = phy_mode; 2990 pp->phy_interface = phy_mode;
2823 2991
@@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev)
2864 pp->dev = dev; 3032 pp->dev = dev;
2865 SET_NETDEV_DEV(dev, &pdev->dev); 3033 SET_NETDEV_DEV(dev, &pdev->dev);
2866 3034
2867 err = mvneta_init(pp, phy_addr); 3035 err = mvneta_init(&pdev->dev, pp);
2868 if (err < 0) { 3036 if (err < 0)
2869 dev_err(&pdev->dev, "can't init eth hal\n");
2870 goto err_free_stats; 3037 goto err_free_stats;
2871 }
2872 3038
2873 err = mvneta_port_power_up(pp, phy_mode); 3039 err = mvneta_port_power_up(pp, phy_mode);
2874 if (err < 0) { 3040 if (err < 0) {
2875 dev_err(&pdev->dev, "can't power up port\n"); 3041 dev_err(&pdev->dev, "can't power up port\n");
2876 goto err_deinit; 3042 goto err_free_stats;
2877 } 3043 }
2878 3044
2879 dram_target_info = mv_mbus_dram_info(); 3045 dram_target_info = mv_mbus_dram_info();
2880 if (dram_target_info) 3046 if (dram_target_info)
2881 mvneta_conf_mbus_windows(pp, dram_target_info); 3047 mvneta_conf_mbus_windows(pp, dram_target_info);
2882 3048
2883 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 3049 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
2884 3050
2885 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 3051 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2886 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 3052 dev->hw_features |= dev->features;
2887 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 3053 dev->vlan_features |= dev->features;
2888 dev->priv_flags |= IFF_UNICAST_FLT; 3054 dev->priv_flags |= IFF_UNICAST_FLT;
3055 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
2889 3056
2890 err = register_netdev(dev); 3057 err = register_netdev(dev);
2891 if (err < 0) { 3058 if (err < 0) {
2892 dev_err(&pdev->dev, "failed to register\n"); 3059 dev_err(&pdev->dev, "failed to register\n");
2893 goto err_deinit; 3060 goto err_free_stats;
2894 } 3061 }
2895 3062
2896 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 3063 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev)
2900 3067
2901 return 0; 3068 return 0;
2902 3069
2903err_deinit:
2904 mvneta_deinit(pp);
2905err_free_stats: 3070err_free_stats:
2906 free_percpu(pp->stats); 3071 free_percpu(pp->stats);
2907err_clk: 3072err_clk:
@@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev)
2920 struct mvneta_port *pp = netdev_priv(dev); 3085 struct mvneta_port *pp = netdev_priv(dev);
2921 3086
2922 unregister_netdev(dev); 3087 unregister_netdev(dev);
2923 mvneta_deinit(pp);
2924 clk_disable_unprepare(pp->clk); 3088 clk_disable_unprepare(pp->clk);
2925 free_percpu(pp->stats); 3089 free_percpu(pp->stats);
2926 irq_dispose_mapping(dev->irq); 3090 irq_dispose_mapping(dev->irq);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index b358c2f6f4bd..8f5aa7c62b18 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1488 dev->netdev_ops = &pxa168_eth_netdev_ops; 1488 dev->netdev_ops = &pxa168_eth_netdev_ops;
1489 dev->watchdog_timeo = 2 * HZ; 1489 dev->watchdog_timeo = 2 * HZ;
1490 dev->base_addr = 0; 1490 dev->base_addr = 0;
1491 SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); 1491 dev->ethtool_ops = &pxa168_ethtool_ops;
1492 1492
1493 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); 1493 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1494 1494
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b81106451a0a..69693384b58c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4760 4760
4761 SET_NETDEV_DEV(dev, &hw->pdev->dev); 4761 SET_NETDEV_DEV(dev, &hw->pdev->dev);
4762 dev->irq = hw->pdev->irq; 4762 dev->irq = hw->pdev->irq;
4763 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); 4763 dev->ethtool_ops = &sky2_ethtool_ops;
4764 dev->watchdog_timeo = TX_WATCHDOG; 4764 dev->watchdog_timeo = TX_WATCHDOG;
4765 dev->netdev_ops = &sky2_netdev_ops[port]; 4765 dev->netdev_ops = &sky2_netdev_ops[port];
4766 4766
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 29b616990e52..5d940a26055c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
212 212
213 /* First, verify that the master reports correct status */ 213 /* First, verify that the master reports correct status */
214 if (comm_pending(dev)) { 214 if (comm_pending(dev)) {
215 mlx4_warn(dev, "Communication channel is not idle." 215 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
216 "my toggle is %d (cmd:0x%x)\n",
217 priv->cmd.comm_toggle, cmd); 216 priv->cmd.comm_toggle, cmd);
218 return -EAGAIN; 217 return -EAGAIN;
219 } 218 }
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
422 *out_param = 421 *out_param =
423 be64_to_cpu(vhcr->out_param); 422 be64_to_cpu(vhcr->out_param);
424 else { 423 else {
425 mlx4_err(dev, "response expected while" 424 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
426 "output mailbox is NULL for " 425 op);
427 "command 0x%x\n", op);
428 vhcr->status = CMD_STAT_BAD_PARAM; 426 vhcr->status = CMD_STAT_BAD_PARAM;
429 } 427 }
430 } 428 }
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
439 *out_param = 437 *out_param =
440 be64_to_cpu(vhcr->out_param); 438 be64_to_cpu(vhcr->out_param);
441 else { 439 else {
442 mlx4_err(dev, "response expected while" 440 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
443 "output mailbox is NULL for " 441 op);
444 "command 0x%x\n", op);
445 vhcr->status = CMD_STAT_BAD_PARAM; 442 vhcr->status = CMD_STAT_BAD_PARAM;
446 } 443 }
447 } 444 }
448 ret = mlx4_status_to_errno(vhcr->status); 445 ret = mlx4_status_to_errno(vhcr->status);
449 } else 446 } else
450 mlx4_err(dev, "failed execution of VHCR_POST command" 447 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
451 "opcode 0x%x\n", op); 448 op);
452 } 449 }
453 450
454 mutex_unlock(&priv->cmd.slave_cmd_mutex); 451 mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
476 goto out; 473 goto out;
477 } 474 }
478 475
476 if (out_is_imm && !out_param) {
477 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
478 op);
479 err = -EINVAL;
480 goto out;
481 }
482
479 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 483 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
480 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 484 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
481 if (err) 485 if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
554 cmd->free_head = context->next; 558 cmd->free_head = context->next;
555 spin_unlock(&cmd->context_lock); 559 spin_unlock(&cmd->context_lock);
556 560
561 if (out_is_imm && !out_param) {
562 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563 op);
564 err = -EINVAL;
565 goto out;
566 }
567
557 init_completion(&context->done); 568 init_completion(&context->done);
558 569
559 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 570 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
625 636
626 if ((slave_addr & 0xfff) | (master_addr & 0xfff) | 637 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
627 (slave & ~0x7f) | (size & 0xff)) { 638 (slave & ~0x7f) | (size & 0xff)) {
628 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " 639 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
629 "master_addr:0x%llx slave_id:%d size:%d\n", 640 slave_addr, master_addr, slave, size);
630 slave_addr, master_addr, slave, size);
631 return -EINVAL; 641 return -EINVAL;
632 } 642 }
633 643
@@ -1422,8 +1432,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1422 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1432 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1423 MLX4_ACCESS_MEM_ALIGN), 1); 1433 MLX4_ACCESS_MEM_ALIGN), 1);
1424 if (ret) { 1434 if (ret) {
1425 mlx4_err(dev, "%s:Failed reading vhcr" 1435 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1426 "ret: 0x%x\n", __func__, ret); 1436 __func__, ret);
1427 kfree(vhcr); 1437 kfree(vhcr);
1428 return ret; 1438 return ret;
1429 } 1439 }
@@ -1474,9 +1484,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1474 1484
1475 /* Apply permission and bound checks if applicable */ 1485 /* Apply permission and bound checks if applicable */
1476 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { 1486 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1477 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " 1487 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1478 "checks for resource_id:%d\n", vhcr->op, slave, 1488 vhcr->op, slave, vhcr->in_modifier);
1479 vhcr->in_modifier);
1480 vhcr_cmd->status = CMD_STAT_BAD_OP; 1489 vhcr_cmd->status = CMD_STAT_BAD_OP;
1481 goto out_status; 1490 goto out_status;
1482 } 1491 }
@@ -1515,8 +1524,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1515 } 1524 }
1516 1525
1517 if (err) { 1526 if (err) {
1518 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" 1527 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1519 " error:%d, status %d\n",
1520 vhcr->op, slave, vhcr->errno, err); 1528 vhcr->op, slave, vhcr->errno, err);
1521 vhcr_cmd->status = mlx4_errno_to_status(err); 1529 vhcr_cmd->status = mlx4_errno_to_status(err);
1522 goto out_status; 1530 goto out_status;
@@ -1550,8 +1558,8 @@ out_status:
1550 __func__); 1558 __func__);
1551 else if (vhcr->e_bit && 1559 else if (vhcr->e_bit &&
1552 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) 1560 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1553 mlx4_warn(dev, "Failed to generate command completion " 1561 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1554 "eqe for slave %d\n", slave); 1562 slave);
1555 } 1563 }
1556 1564
1557out: 1565out:
@@ -1590,8 +1598,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1590 1598
1591 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", 1599 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1592 slave, port); 1600 slave, port);
1593 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, 1601 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1594 vp_admin->default_qos, vp_admin->link_state); 1602 vp_admin->default_vlan, vp_admin->default_qos,
1603 vp_admin->link_state);
1595 1604
1596 work = kzalloc(sizeof(*work), GFP_KERNEL); 1605 work = kzalloc(sizeof(*work), GFP_KERNEL);
1597 if (!work) 1606 if (!work)
@@ -1604,7 +1613,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1604 &admin_vlan_ix); 1613 &admin_vlan_ix);
1605 if (err) { 1614 if (err) {
1606 kfree(work); 1615 kfree(work);
1607 mlx4_warn((&priv->dev), 1616 mlx4_warn(&priv->dev,
1608 "No vlan resources slave %d, port %d\n", 1617 "No vlan resources slave %d, port %d\n",
1609 slave, port); 1618 slave, port);
1610 return err; 1619 return err;
@@ -1613,7 +1622,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1613 admin_vlan_ix = NO_INDX; 1622 admin_vlan_ix = NO_INDX;
1614 } 1623 }
1615 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1624 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1616 mlx4_dbg((&(priv->dev)), 1625 mlx4_dbg(&priv->dev,
1617 "alloc vlan %d idx %d slave %d port %d\n", 1626 "alloc vlan %d idx %d slave %d port %d\n",
1618 (int)(vp_admin->default_vlan), 1627 (int)(vp_admin->default_vlan),
1619 admin_vlan_ix, slave, port); 1628 admin_vlan_ix, slave, port);
@@ -1676,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1676 vp_admin->default_vlan, &(vp_oper->vlan_idx)); 1685 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1677 if (err) { 1686 if (err) {
1678 vp_oper->vlan_idx = NO_INDX; 1687 vp_oper->vlan_idx = NO_INDX;
1679 mlx4_warn((&priv->dev), 1688 mlx4_warn(&priv->dev,
1680 "No vlan resorces slave %d, port %d\n", 1689 "No vlan resorces slave %d, port %d\n",
1681 slave, port); 1690 slave, port);
1682 return err; 1691 return err;
1683 } 1692 }
1684 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", 1693 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
1685 (int)(vp_oper->state.default_vlan), 1694 (int)(vp_oper->state.default_vlan),
1686 vp_oper->vlan_idx, slave, port); 1695 vp_oper->vlan_idx, slave, port);
1687 } 1696 }
@@ -1692,12 +1701,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1692 if (0 > vp_oper->mac_idx) { 1701 if (0 > vp_oper->mac_idx) {
1693 err = vp_oper->mac_idx; 1702 err = vp_oper->mac_idx;
1694 vp_oper->mac_idx = NO_INDX; 1703 vp_oper->mac_idx = NO_INDX;
1695 mlx4_warn((&priv->dev), 1704 mlx4_warn(&priv->dev,
1696 "No mac resorces slave %d, port %d\n", 1705 "No mac resorces slave %d, port %d\n",
1697 slave, port); 1706 slave, port);
1698 return err; 1707 return err;
1699 } 1708 }
1700 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", 1709 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
1701 vp_oper->state.mac, vp_oper->mac_idx, slave, port); 1710 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1702 } 1711 }
1703 } 1712 }
@@ -1748,8 +1757,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1748 slave_state[slave].comm_toggle ^= 1; 1757 slave_state[slave].comm_toggle ^= 1;
1749 reply = (u32) slave_state[slave].comm_toggle << 31; 1758 reply = (u32) slave_state[slave].comm_toggle << 31;
1750 if (toggle != slave_state[slave].comm_toggle) { 1759 if (toggle != slave_state[slave].comm_toggle) {
1751 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" 1760 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1752 "STATE COMPROMISIED ***\n", toggle, slave); 1761 toggle, slave);
1753 goto reset_slave; 1762 goto reset_slave;
1754 } 1763 }
1755 if (cmd == MLX4_COMM_CMD_RESET) { 1764 if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1776,8 +1785,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1776 /*command from slave in the middle of FLR*/ 1785 /*command from slave in the middle of FLR*/
1777 if (cmd != MLX4_COMM_CMD_RESET && 1786 if (cmd != MLX4_COMM_CMD_RESET &&
1778 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1787 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1779 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " 1788 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1780 "in the middle of FLR\n", slave, cmd); 1789 slave, cmd);
1781 return; 1790 return;
1782 } 1791 }
1783 1792
@@ -1815,8 +1824,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1815 1824
1816 mutex_lock(&priv->cmd.slave_cmd_mutex); 1825 mutex_lock(&priv->cmd.slave_cmd_mutex);
1817 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 1826 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1818 mlx4_err(dev, "Failed processing vhcr for slave:%d," 1827 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1819 " resetting slave.\n", slave); 1828 slave);
1820 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1829 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1821 goto reset_slave; 1830 goto reset_slave;
1822 } 1831 }
@@ -1833,8 +1842,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1833 is_going_down = 1; 1842 is_going_down = 1;
1834 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 1843 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1835 if (is_going_down) { 1844 if (is_going_down) {
1836 mlx4_warn(dev, "Slave is going down aborting command(%d)" 1845 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
1837 " executing from slave:%d\n",
1838 cmd, slave); 1846 cmd, slave);
1839 return; 1847 return;
1840 } 1848 }
@@ -1897,10 +1905,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
1897 if (toggle != slt) { 1905 if (toggle != slt) {
1898 if (master->slave_state[slave].comm_toggle 1906 if (master->slave_state[slave].comm_toggle
1899 != slt) { 1907 != slt) {
1900 printk(KERN_INFO "slave %d out of sync." 1908 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
1901 " read toggle %d, state toggle %d. " 1909 slave, slt,
1902 "Resynching.\n", slave, slt, 1910 master->slave_state[slave].comm_toggle);
1903 master->slave_state[slave].comm_toggle);
1904 master->slave_state[slave].comm_toggle = 1911 master->slave_state[slave].comm_toggle =
1905 slt; 1912 slt;
1906 } 1913 }
@@ -1913,8 +1920,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
1913 } 1920 }
1914 1921
1915 if (reported && reported != served) 1922 if (reported && reported != served)
1916 mlx4_warn(dev, "Got command event with bitmask from %d slaves" 1923 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
1917 " but %d were served\n",
1918 reported, served); 1924 reported, served);
1919 1925
1920 if (mlx4_ARM_COMM_CHANNEL(dev)) 1926 if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1970,7 +1976,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1970 ioremap(pci_resource_start(dev->pdev, 2) + 1976 ioremap(pci_resource_start(dev->pdev, 2) +
1971 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 1977 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1972 if (!priv->mfunc.comm) { 1978 if (!priv->mfunc.comm) {
1973 mlx4_err(dev, "Couldn't map communication vector.\n"); 1979 mlx4_err(dev, "Couldn't map communication vector\n");
1974 goto err_vhcr; 1980 goto err_vhcr;
1975 } 1981 }
1976 1982
@@ -2097,7 +2103,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2097 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2103 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2098 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2104 MLX4_HCR_BASE, MLX4_HCR_SIZE);
2099 if (!priv->cmd.hcr) { 2105 if (!priv->cmd.hcr) {
2100 mlx4_err(dev, "Couldn't map command register.\n"); 2106 mlx4_err(dev, "Couldn't map command register\n");
2101 return -ENOMEM; 2107 return -ENOMEM;
2102 } 2108 }
2103 } 2109 }
@@ -2498,11 +2504,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
2498 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); 2504 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2499 ivf->mac[5] = ((s_info->mac) & 0xff); 2505 ivf->mac[5] = ((s_info->mac) & 0xff);
2500 2506
2501 ivf->vlan = s_info->default_vlan; 2507 ivf->vlan = s_info->default_vlan;
2502 ivf->qos = s_info->default_qos; 2508 ivf->qos = s_info->default_qos;
2503 ivf->tx_rate = s_info->tx_rate; 2509 ivf->max_tx_rate = s_info->tx_rate;
2504 ivf->spoofchk = s_info->spoofchk; 2510 ivf->min_tx_rate = 0;
2505 ivf->linkstate = s_info->link_state; 2511 ivf->spoofchk = s_info->spoofchk;
2512 ivf->linkstate = s_info->link_state;
2506 2513
2507 return 0; 2514 return 0;
2508} 2515}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index c90cde5b4aee..80f725228f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
293 atomic_set(&cq->refcount, 1); 293 atomic_set(&cq->refcount, 1);
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
296 return 0; 299 return 0;
297 300
298err_radix: 301err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..4b2130760eed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
125 &cq->vector)) { 125 &cq->vector)) {
126 cq->vector = (cq->ring + 1 + priv->port) 126 cq->vector = (cq->ring + 1 + priv->port)
127 % mdev->dev->caps.num_comp_vectors; 127 % mdev->dev->caps.num_comp_vectors;
128 mlx4_warn(mdev, "Failed Assigning an EQ to " 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 "%s ,Falling back to legacy EQ's\n",
130 name); 129 name);
131 } 130 }
132 } 131 }
@@ -164,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
164 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, 163 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
165 NAPI_POLL_WEIGHT); 164 NAPI_POLL_WEIGHT);
166 } else { 165 } else {
166 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
167
168 err = irq_set_affinity_hint(cq->mcq.irq,
169 ring->affinity_mask);
170 if (err)
171 mlx4_warn(mdev, "Failed setting affinity hint\n");
172
167 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 173 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
168 napi_hash_add(&cq->napi); 174 napi_hash_add(&cq->napi);
169 } 175 }
@@ -180,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
180 186
181 mlx4_en_unmap_buffer(&cq->wqres.buf); 187 mlx4_en_unmap_buffer(&cq->wqres.buf);
182 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
183 if (priv->mdev->dev->caps.comp_pool && cq->vector) 189 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
184 mlx4_release_eq(priv->mdev->dev, cq->vector); 192 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 }
185 cq->vector = 0; 194 cq->vector = 0;
186 cq->buf_size = 0; 195 cq->buf_size = 0;
187 cq->buf = NULL; 196 cq->buf = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..fa1a069e14e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -378,8 +378,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
378 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); 378 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
379 cmd->duplex = DUPLEX_FULL; 379 cmd->duplex = DUPLEX_FULL;
380 } else { 380 } else {
381 ethtool_cmd_speed_set(cmd, -1); 381 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
382 cmd->duplex = -1; 382 cmd->duplex = DUPLEX_UNKNOWN;
383 } 383 }
384 384
385 if (trans_type > 0 && trans_type <= 0xC) { 385 if (trans_type > 0 && trans_type <= 0xC) {
@@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
564 return priv->rx_ring_num; 564 return priv->rx_ring_num;
565} 565}
566 566
567static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) 567static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
568{ 568{
569 struct mlx4_en_priv *priv = netdev_priv(dev); 569 struct mlx4_en_priv *priv = netdev_priv(dev);
570 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 570 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
582 return err; 582 return err;
583} 583}
584 584
585static int mlx4_en_set_rxfh_indir(struct net_device *dev, 585static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
586 const u32 *ring_index) 586 const u8 *key)
587{ 587{
588 struct mlx4_en_priv *priv = netdev_priv(dev); 588 struct mlx4_en_priv *priv = netdev_priv(dev);
589 struct mlx4_en_dev *mdev = priv->mdev; 589 struct mlx4_en_dev *mdev = priv->mdev;
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
926 } else { 926 } else {
927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n", 928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
929 cmd->fs.ring_cookie); 929 cmd->fs.ring_cookie);
930 return -EINVAL; 930 return -EINVAL;
931 } 931 }
932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
933 if (!qpn) { 933 if (!qpn) {
934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n", 934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
935 cmd->fs.ring_cookie); 935 cmd->fs.ring_cookie);
936 return -EINVAL; 936 return -EINVAL;
937 } 937 }
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
956 } 956 }
957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); 957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
958 if (err) { 958 if (err) {
959 en_err(priv, "Fail to attach network rule at location %d.\n", 959 en_err(priv, "Fail to attach network rule at location %d\n",
960 cmd->fs.location); 960 cmd->fs.location);
961 goto out_free_list; 961 goto out_free_list;
962 } 962 }
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1121{ 1121{
1122 struct mlx4_en_priv *priv = netdev_priv(dev); 1122 struct mlx4_en_priv *priv = netdev_priv(dev);
1123 struct mlx4_en_dev *mdev = priv->mdev; 1123 struct mlx4_en_dev *mdev = priv->mdev;
1124 int port_up; 1124 int port_up = 0;
1125 int err = 0; 1125 int err = 0;
1126 1126
1127 if (channel->other_count || channel->combined_count || 1127 if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
1151 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1151 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1152 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1152 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1153 1153
1154 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); 1154 if (dev->num_tc)
1155 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1155 1156
1156 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); 1157 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1157 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); 1158 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
@@ -1223,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
1223 .get_rxnfc = mlx4_en_get_rxnfc, 1224 .get_rxnfc = mlx4_en_get_rxnfc,
1224 .set_rxnfc = mlx4_en_set_rxnfc, 1225 .set_rxnfc = mlx4_en_set_rxnfc,
1225 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1226 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1226 .get_rxfh_indir = mlx4_en_get_rxfh_indir, 1227 .get_rxfh = mlx4_en_get_rxfh,
1227 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1228 .set_rxfh = mlx4_en_set_rxfh,
1228 .get_channels = mlx4_en_get_channels, 1229 .get_channels = mlx4_en_get_channels,
1229 .set_channels = mlx4_en_set_channels, 1230 .set_channels = mlx4_en_set_channels,
1230 .get_ts_info = mlx4_en_get_ts_info, 1231 .get_ts_info = mlx4_en_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
133 MLX4_EN_MAX_TX_RING_P_UP); 133 MLX4_EN_MAX_TX_RING_P_UP);
134 if (params->udp_rss && !(mdev->dev->caps.flags 134 if (params->udp_rss && !(mdev->dev->caps.flags
135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
136 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); 136 mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
137 params->udp_rss = 0; 137 params->udp_rss = 0;
138 } 138 }
139 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 139 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
251 251
252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
253 if (!mdev->LSO_support) 253 if (!mdev->LSO_support)
254 mlx4_warn(mdev, "LSO not supported, please upgrade to later " 254 mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
255 "FW version to enable LSO\n");
256 255
257 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, 256 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
258 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 257 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
268 /* Build device profile according to supplied module parameters */ 267 /* Build device profile according to supplied module parameters */
269 err = mlx4_en_get_profile(mdev); 268 err = mlx4_en_get_profile(mdev);
270 if (err) { 269 if (err) {
271 mlx4_err(mdev, "Bad module parameters, aborting.\n"); 270 mlx4_err(mdev, "Bad module parameters, aborting\n");
272 goto err_mr; 271 goto err_mr;
273 } 272 }
274 273
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..7d4fb7bf2593 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
130 case IPPROTO_TCP: 130 case IPPROTO_TCP:
131 return MLX4_NET_TRANS_RULE_ID_TCP; 131 return MLX4_NET_TRANS_RULE_ID_TCP;
132 default: 132 default:
133 return -EPROTONOSUPPORT; 133 return MLX4_NET_TRANS_RULE_NUM;
134 } 134 }
135}; 135};
136 136
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
177 int rc; 177 int rc;
178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 178 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
179 179
180 if (spec_tcp_udp.id < 0) { 180 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
182 filter->ip_proto); 182 filter->ip_proto);
183 goto ignore; 183 goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
770 priv->dev->dev_addr, priv->prev_mac); 770 priv->dev->dev_addr, priv->prev_mac);
771 if (err) 771 if (err)
772 en_err(priv, "Failed changing HW MAC address\n"); 772 en_err(priv, "Failed changing HW MAC address\n");
773 memcpy(priv->prev_mac, priv->dev->dev_addr,
774 sizeof(priv->prev_mac));
775 } else 773 } else
776 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 774 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
777 775
776 memcpy(priv->prev_mac, priv->dev->dev_addr,
777 sizeof(priv->prev_mac));
778
778 return err; 779 return err;
779} 780}
780 781
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
788 if (!is_valid_ether_addr(saddr->sa_data)) 789 if (!is_valid_ether_addr(saddr->sa_data))
789 return -EADDRNOTAVAIL; 790 return -EADDRNOTAVAIL;
790 791
791 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
792
793 mutex_lock(&mdev->state_lock); 792 mutex_lock(&mdev->state_lock);
793 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
794 err = mlx4_en_do_set_mac(priv); 794 err = mlx4_en_do_set_mac(priv);
795 mutex_unlock(&mdev->state_lock); 795 mutex_unlock(&mdev->state_lock);
796 796
@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
1526 mutex_unlock(&mdev->state_lock); 1526 mutex_unlock(&mdev->state_lock);
1527} 1527}
1528 1528
1529static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1530{
1531 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1532 int numa_node = priv->mdev->dev->numa_node;
1533 int ret = 0;
1534
1535 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1536 return -ENOMEM;
1537
1538 ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
1539 ring->affinity_mask);
1540 if (ret)
1541 free_cpumask_var(ring->affinity_mask);
1542
1543 return ret;
1544}
1545
1546static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1547{
1548 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1549}
1529 1550
1530int mlx4_en_start_port(struct net_device *dev) 1551int mlx4_en_start_port(struct net_device *dev)
1531{ 1552{
@@ -1567,17 +1588,25 @@ int mlx4_en_start_port(struct net_device *dev)
1567 1588
1568 mlx4_en_cq_init_lock(cq); 1589 mlx4_en_cq_init_lock(cq);
1569 1590
1591 err = mlx4_en_init_affinity_hint(priv, i);
1592 if (err) {
1593 en_err(priv, "Failed preparing IRQ affinity hint\n");
1594 goto cq_err;
1595 }
1596
1570 err = mlx4_en_activate_cq(priv, cq, i); 1597 err = mlx4_en_activate_cq(priv, cq, i);
1571 if (err) { 1598 if (err) {
1572 en_err(priv, "Failed activating Rx CQ\n"); 1599 en_err(priv, "Failed activating Rx CQ\n");
1600 mlx4_en_free_affinity_hint(priv, i);
1573 goto cq_err; 1601 goto cq_err;
1574 } 1602 }
1575 for (j = 0; j < cq->size; j++) 1603 for (j = 0; j < cq->size; j++)
1576 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1604 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1577 err = mlx4_en_set_cq_moder(priv, cq); 1605 err = mlx4_en_set_cq_moder(priv, cq);
1578 if (err) { 1606 if (err) {
1579 en_err(priv, "Failed setting cq moderation parameters"); 1607 en_err(priv, "Failed setting cq moderation parameters\n");
1580 mlx4_en_deactivate_cq(priv, cq); 1608 mlx4_en_deactivate_cq(priv, cq);
1609 mlx4_en_free_affinity_hint(priv, i);
1581 goto cq_err; 1610 goto cq_err;
1582 } 1611 }
1583 mlx4_en_arm_cq(priv, cq); 1612 mlx4_en_arm_cq(priv, cq);
@@ -1615,7 +1644,7 @@ int mlx4_en_start_port(struct net_device *dev)
1615 } 1644 }
1616 err = mlx4_en_set_cq_moder(priv, cq); 1645 err = mlx4_en_set_cq_moder(priv, cq);
1617 if (err) { 1646 if (err) {
1618 en_err(priv, "Failed setting cq moderation parameters"); 1647 en_err(priv, "Failed setting cq moderation parameters\n");
1619 mlx4_en_deactivate_cq(priv, cq); 1648 mlx4_en_deactivate_cq(priv, cq);
1620 goto tx_err; 1649 goto tx_err;
1621 } 1650 }
@@ -1715,8 +1744,10 @@ rss_err:
1715mac_err: 1744mac_err:
1716 mlx4_en_put_qp(priv); 1745 mlx4_en_put_qp(priv);
1717cq_err: 1746cq_err:
1718 while (rx_index--) 1747 while (rx_index--) {
1719 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1748 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1749 mlx4_en_free_affinity_hint(priv, i);
1750 }
1720 for (i = 0; i < priv->rx_ring_num; i++) 1751 for (i = 0; i < priv->rx_ring_num; i++)
1721 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1752 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1722 1753
@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1847 msleep(1); 1878 msleep(1);
1848 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1879 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1849 mlx4_en_deactivate_cq(priv, cq); 1880 mlx4_en_deactivate_cq(priv, cq);
1881
1882 mlx4_en_free_affinity_hint(priv, i);
1850 } 1883 }
1851} 1884}
1852 1885
@@ -2539,7 +2572,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2539 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 2572 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2540 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 2573 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2541 2574
2542 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 2575 dev->ethtool_ops = &mlx4_en_ethtool_ops;
2543 2576
2544 /* 2577 /*
2545 * Set driver features 2578 * Set driver features
@@ -2594,8 +2627,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2594 prof->tx_pause, prof->tx_ppp, 2627 prof->tx_pause, prof->tx_ppp,
2595 prof->rx_pause, prof->rx_ppp); 2628 prof->rx_pause, prof->rx_ppp);
2596 if (err) { 2629 if (err) {
2597 en_err(priv, "Failed setting port general configurations " 2630 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2598 "for port %d, with error %d\n", priv->port, err); 2631 priv->port, err);
2599 goto out; 2632 goto out;
2600 } 2633 }
2601 2634
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 87857a6463eb..d2d415732d99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
270 ring->actual_size, 270 ring->actual_size,
271 GFP_KERNEL)) { 271 GFP_KERNEL)) {
272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
273 en_err(priv, "Failed to allocate " 273 en_err(priv, "Failed to allocate enough rx buffers\n");
274 "enough rx buffers\n");
275 return -ENOMEM; 274 return -ENOMEM;
276 } else { 275 } else {
277 new_size = rounddown_pow_of_two(ring->actual_size); 276 new_size = rounddown_pow_of_two(ring->actual_size);
278 en_warn(priv, "Only %d buffers allocated " 277 en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
279 "reducing ring size to %d",
280 ring->actual_size, new_size); 278 ring->actual_size, new_size);
281 goto reduce_rings; 279 goto reduce_rings;
282 } 280 }
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
685 /* Drop packet on bad receive or bad checksum */ 683 /* Drop packet on bad receive or bad checksum */
686 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 684 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
687 MLX4_CQE_OPCODE_ERROR)) { 685 MLX4_CQE_OPCODE_ERROR)) {
688 en_err(priv, "CQE completed in error - vendor " 686 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
689 "syndrom:%d syndrom:%d\n", 687 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
690 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 688 ((struct mlx4_err_cqe *)cqe)->syndrome);
691 ((struct mlx4_err_cqe *) cqe)->syndrome);
692 goto next; 689 goto next;
693 } 690 }
694 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 691 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
898 mlx4_en_cq_unlock_napi(cq); 895 mlx4_en_cq_unlock_napi(cq);
899 896
900 /* If we used up all the quota - we're probably not done yet... */ 897 /* If we used up all the quota - we're probably not done yet... */
901 if (done == budget) 898 if (done == budget) {
902 INC_PERF_COUNTER(priv->pstats.napi_quota); 899 INC_PERF_COUNTER(priv->pstats.napi_quota);
903 else { 900 if (unlikely(cq->mcq.irq_affinity_change)) {
901 cq->mcq.irq_affinity_change = false;
902 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq);
904 return 0;
905 }
906 } else {
904 /* Done for now */ 907 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
905 napi_complete(napi); 909 napi_complete(napi);
906 mlx4_en_arm_cq(priv, cq); 910 mlx4_en_arm_cq(priv, cq);
907 } 911 }
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
944 priv->rx_skb_size = eff_mtu; 948 priv->rx_skb_size = eff_mtu;
945 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); 949 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
946 950
947 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 951 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
948 "num_frags:%d):\n", eff_mtu, priv->num_frags); 952 eff_mtu, priv->num_frags);
949 for (i = 0; i < priv->num_frags; i++) { 953 for (i = 0; i < priv->num_frags; i++) {
950 en_err(priv, 954 en_err(priv,
951 " frag:%d - size:%d prefix:%d align:%d stride:%d\n", 955 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index bc0cc1eb214d..8be7483f8236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
108 108
109 ring->buf = ring->wqres.buf.direct.buf; 109 ring->buf = ring->wqres.buf.direct.buf;
110 110
111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
112 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 112 ring, ring->buf, ring->size, ring->buf_size,
113 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 113 (unsigned long long) ring->wqres.buf.direct.map);
114 114
115 ring->qpn = qpn; 115 ring->qpn = qpn;
116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); 116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
122 122
123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
124 if (err) { 124 if (err) {
125 en_dbg(DRV, priv, "working without blueflame (%d)", err); 125 en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
126 ring->bf.uar = &mdev->priv_uar; 126 ring->bf.uar = &mdev->priv_uar;
127 ring->bf.uar->map = mdev->uar_map; 127 ring->bf.uar->map = mdev->uar_map;
128 ring->bf_enabled = false; 128 ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
474 /* If we used up all the quota - we're probably not done yet... */ 474 /* If we used up all the quota - we're probably not done yet... */
475 if (done < budget) { 475 if (done < budget) {
476 /* Done for now */ 476 /* Done for now */
477 cq->mcq.irq_affinity_change = false;
477 napi_complete(napi); 478 napi_complete(napi);
478 mlx4_en_arm_cq(priv, cq); 479 mlx4_en_arm_cq(priv, cq);
479 return done; 480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
480 } 486 }
481 return budget; 487 return budget;
482} 488}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..d954ec1eac17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,6 +53,11 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
56#define MLX4_EQ_STATUS_OK ( 0 << 28) 61#define MLX4_EQ_STATUS_OK ( 0 << 28)
57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58#define MLX4_EQ_OWNER_SW ( 0 << 24) 63#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
152 if (i != dev->caps.function && 157 if (i != dev->caps.function &&
153 master->slave_state[i].active) 158 master->slave_state[i].active)
154 if (mlx4_GEN_EQE(dev, i, eqe)) 159 if (mlx4_GEN_EQE(dev, i, eqe))
155 mlx4_warn(dev, "Failed to " 160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
156 " generate event " 161 i);
157 "for slave %d\n", i);
158 } 162 }
159 } else { 163 } else {
160 if (mlx4_GEN_EQE(dev, slave, eqe)) 164 if (mlx4_GEN_EQE(dev, slave, eqe))
161 mlx4_warn(dev, "Failed to generate event " 165 mlx4_warn(dev, "Failed to generate event for slave %d\n",
162 "for slave %d\n", slave); 166 slave);
163 } 167 }
164 ++slave_eq->cons; 168 ++slave_eq->cons;
165 } 169 }
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
177 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 181 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
178 if ((!!(s_eqe->owner & 0x80)) ^ 182 if ((!!(s_eqe->owner & 0x80)) ^
179 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 183 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
180 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " 184 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
181 "No free EQE on slave events queue\n", slave); 185 slave);
182 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 186 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
183 return; 187 return;
184 } 188 }
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
375 } 379 }
376 break; 380 break;
377 default: 381 default:
378 pr_err("%s: BUG!!! UNKNOWN state: " 382 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
379 "slave:%d, port:%d\n", __func__, slave, port); 383 __func__, slave, port);
380 goto out; 384 goto out;
381 } 385 }
382 ret = mlx4_get_slave_port_state(dev, slave, port); 386 ret = mlx4_get_slave_port_state(dev, slave, port);
383 387
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
425 for (i = 0 ; i < dev->num_slaves; i++) { 429 for (i = 0 ; i < dev->num_slaves; i++) {
426 430
427 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 431 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
428 mlx4_dbg(dev, "mlx4_handle_slave_flr: " 432 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
429 "clean slave: %d\n", i); 433 i);
430 434
431 mlx4_delete_all_resources_for_slave(dev, i); 435 mlx4_delete_all_resources_for_slave(dev, i);
432 /*return the slave to running mode*/ 436 /*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
438 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 442 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
439 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 443 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
440 if (err) 444 if (err)
441 mlx4_warn(dev, "Failed to notify FW on " 445 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
442 "FLR done (slave:%d)\n", i); 446 i);
443 } 447 }
444 } 448 }
445} 449}
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
490 be32_to_cpu(eqe->event.qp.qpn) 494 be32_to_cpu(eqe->event.qp.qpn)
491 & 0xffffff, &slave); 495 & 0xffffff, &slave);
492 if (ret && ret != -ENOENT) { 496 if (ret && ret != -ENOENT) {
493 mlx4_dbg(dev, "QP event %02x(%02x) on " 497 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
494 "EQ %d at index %u: could "
495 "not get slave id (%d)\n",
496 eqe->type, eqe->subtype, 498 eqe->type, eqe->subtype,
497 eq->eqn, eq->cons_index, ret); 499 eq->eqn, eq->cons_index, ret);
498 break; 500 break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
520 & 0xffffff, 522 & 0xffffff,
521 &slave); 523 &slave);
522 if (ret && ret != -ENOENT) { 524 if (ret && ret != -ENOENT) {
523 mlx4_warn(dev, "SRQ event %02x(%02x) " 525 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
524 "on EQ %d at index %u: could"
525 " not get slave id (%d)\n",
526 eqe->type, eqe->subtype, 526 eqe->type, eqe->subtype,
527 eq->eqn, eq->cons_index, ret); 527 eq->eqn, eq->cons_index, ret);
528 break; 528 break;
529 } 529 }
530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," 530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
531 " event: %02x(%02x)\n", __func__, 531 __func__, slave,
532 slave,
533 be32_to_cpu(eqe->event.srq.srqn), 532 be32_to_cpu(eqe->event.srq.srqn),
534 eqe->type, eqe->subtype); 533 eqe->type, eqe->subtype);
535 534
536 if (!ret && slave != dev->caps.function) { 535 if (!ret && slave != dev->caps.function) {
537 mlx4_warn(dev, "%s: sending event " 536 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
538 "%02x(%02x) to slave:%d\n", 537 __func__, eqe->type,
539 __func__, eqe->type,
540 eqe->subtype, slave); 538 eqe->subtype, slave);
541 mlx4_slave_event(dev, slave, eqe); 539 mlx4_slave_event(dev, slave, eqe);
542 break; 540 break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 567 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
570 if (i == mlx4_master_func_num(dev)) 568 if (i == mlx4_master_func_num(dev))
571 continue; 569 continue;
572 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" 570 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
573 " to slave: %d, port:%d\n",
574 __func__, i, port); 571 __func__, i, port);
575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 572 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 573 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
634 be32_to_cpu(eqe->event.cq_err.cqn) 631 be32_to_cpu(eqe->event.cq_err.cqn)
635 & 0xffffff, &slave); 632 & 0xffffff, &slave);
636 if (ret && ret != -ENOENT) { 633 if (ret && ret != -ENOENT) {
637 mlx4_dbg(dev, "CQ event %02x(%02x) on " 634 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
638 "EQ %d at index %u: could " 635 eqe->type, eqe->subtype,
639 "not get slave id (%d)\n", 636 eq->eqn, eq->cons_index, ret);
640 eqe->type, eqe->subtype,
641 eq->eqn, eq->cons_index, ret);
642 break; 637 break;
643 } 638 }
644 639
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
667 662
668 case MLX4_EVENT_TYPE_COMM_CHANNEL: 663 case MLX4_EVENT_TYPE_COMM_CHANNEL:
669 if (!mlx4_is_master(dev)) { 664 if (!mlx4_is_master(dev)) {
670 mlx4_warn(dev, "Received comm channel event " 665 mlx4_warn(dev, "Received comm channel event for non master device\n");
671 "for non master device\n");
672 break; 666 break;
673 } 667 }
674 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 668 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
681 case MLX4_EVENT_TYPE_FLR_EVENT: 675 case MLX4_EVENT_TYPE_FLR_EVENT:
682 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 676 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
683 if (!mlx4_is_master(dev)) { 677 if (!mlx4_is_master(dev)) {
684 mlx4_warn(dev, "Non-master function received" 678 mlx4_warn(dev, "Non-master function received FLR event\n");
685 "FLR event\n");
686 break; 679 break;
687 } 680 }
688 681
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
711 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 704 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
712 if (mlx4_is_master(dev)) 705 if (mlx4_is_master(dev))
713 for (i = 0; i < dev->num_slaves; i++) { 706 for (i = 0; i < dev->num_slaves; i++) {
714 mlx4_dbg(dev, "%s: Sending " 707 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
715 "MLX4_FATAL_WARNING_SUBTYPE_WARMING" 708 __func__, i);
716 " to slave: %d\n", __func__, i);
717 if (i == dev->caps.function) 709 if (i == dev->caps.function)
718 continue; 710 continue;
719 mlx4_slave_event(dev, i, eqe); 711 mlx4_slave_event(dev, i, eqe);
720 } 712 }
721 mlx4_err(dev, "Temperature Threshold was reached! " 713 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
722 "Threshold: %d celsius degrees; " 714 be16_to_cpu(eqe->event.warming.warning_threshold),
723 "Current Temperature: %d\n", 715 be16_to_cpu(eqe->event.warming.current_temperature));
724 be16_to_cpu(eqe->event.warming.warning_threshold),
725 be16_to_cpu(eqe->event.warming.current_temperature));
726 } else 716 } else
727 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " 717 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
728 "subtype %02x on EQ %d at index %u. owner=%x, "
729 "nent=0x%x, slave=%x, ownership=%s\n",
730 eqe->type, eqe->subtype, eq->eqn, 718 eqe->type, eqe->subtype, eq->eqn,
731 eq->cons_index, eqe->owner, eq->nent, 719 eq->cons_index, eqe->owner, eq->nent,
732 eqe->slave_id, 720 eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
743 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 731 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
744 case MLX4_EVENT_TYPE_ECC_DETECT: 732 case MLX4_EVENT_TYPE_ECC_DETECT:
745 default: 733 default:
746 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " 734 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
747 "index %u. owner=%x, nent=0x%x, slave=%x, "
748 "ownership=%s\n",
749 eqe->type, eqe->subtype, eq->eqn, 735 eqe->type, eqe->subtype, eq->eqn,
750 eq->cons_index, eqe->owner, eq->nent, 736 eq->cons_index, eqe->owner, eq->nent,
751 eqe->slave_id, 737 eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
1088 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1074 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1089 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1075 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1090 if (!priv->clr_base) { 1076 if (!priv->clr_base) {
1091 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 1077 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1092 return -ENOMEM; 1078 return -ENOMEM;
1093 } 1079 }
1094 1080
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1102 iounmap(priv->clr_base); 1088 iounmap(priv->clr_base);
1103} 1089}
1104 1090
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1105int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1142int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1106{ 1143{
1107 struct mlx4_priv *priv = mlx4_priv(dev); 1144 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1372 continue; 1409 continue;
1373 /*we dont want to break here*/ 1410 /*we dont want to break here*/
1374 } 1411 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414
1375 eq_set_ci(&priv->eq_table.eq[vec], 1); 1415 eq_set_ci(&priv->eq_table.eq[vec], 1);
1376 } 1416 }
1377 } 1417 }
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1398 Belonging to a legacy EQ*/ 1438 Belonging to a legacy EQ*/
1399 mutex_lock(&priv->msix_ctl.pool_lock); 1439 mutex_lock(&priv->msix_ctl.pool_lock);
1400 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1440 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1401 free_irq(priv->eq_table.eq[vec].irq, 1444 free_irq(priv->eq_table.eq[vec].irq,
1402 &priv->eq_table.eq[vec]); 1445 &priv->eq_table.eq[vec]);
1403 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1446 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 01e6dd61ee3c..688e1eabab29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -437,8 +437,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
440 mlx4_err(dev, "phy_wqe_gid is " 440 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
441 "enforced on this ib port\n");
442 err = -EPROTONOSUPPORT; 441 err = -EPROTONOSUPPORT;
443 goto out; 442 goto out;
444 } 443 }
@@ -1070,10 +1069,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1070 */ 1069 */
1071 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1070 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1072 if (lg < MLX4_ICM_PAGE_SHIFT) { 1071 if (lg < MLX4_ICM_PAGE_SHIFT) {
1073 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", 1072 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1074 MLX4_ICM_PAGE_SIZE, 1073 MLX4_ICM_PAGE_SIZE,
1075 (unsigned long long) mlx4_icm_addr(&iter), 1074 (unsigned long long) mlx4_icm_addr(&iter),
1076 mlx4_icm_size(&iter)); 1075 mlx4_icm_size(&iter));
1077 err = -EINVAL; 1076 err = -EINVAL;
1078 goto out; 1077 goto out;
1079 } 1078 }
@@ -1109,14 +1108,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1109 1108
1110 switch (op) { 1109 switch (op) {
1111 case MLX4_CMD_MAP_FA: 1110 case MLX4_CMD_MAP_FA:
1112 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); 1111 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1113 break; 1112 break;
1114 case MLX4_CMD_MAP_ICM_AUX: 1113 case MLX4_CMD_MAP_ICM_AUX:
1115 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); 1114 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1116 break; 1115 break;
1117 case MLX4_CMD_MAP_ICM: 1116 case MLX4_CMD_MAP_ICM:
1118 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", 1117 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1119 tc, ts, (unsigned long long) virt - (ts << 10)); 1118 tc, ts, (unsigned long long) virt - (ts << 10));
1120 break; 1119 break;
1121 } 1120 }
1122 1121
@@ -1202,14 +1201,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
1202 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1201 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1203 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1202 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1204 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1203 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1205 mlx4_err(dev, "Installed FW has unsupported " 1204 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1206 "command interface revision %d.\n",
1207 cmd_if_rev); 1205 cmd_if_rev);
1208 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1206 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1209 (int) (dev->caps.fw_ver >> 32), 1207 (int) (dev->caps.fw_ver >> 32),
1210 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1208 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1211 (int) dev->caps.fw_ver & 0xffff); 1209 (int) dev->caps.fw_ver & 0xffff);
1212 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", 1210 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1213 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1211 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1214 err = -ENODEV; 1212 err = -ENODEV;
1215 goto out; 1213 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 26169b3eaed8..5f42f6d6e4c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
104MODULE_PARM_DESC(enable_64b_cqe_eqe, 104MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
106 106
107#define HCA_GLOBAL_CAP_MASK 0
108
109#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE 107#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
110 108
111static char mlx4_version[] = 109static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
134 132
135static bool use_prio; 133static bool use_prio;
136module_param_named(use_prio, use_prio, bool, 0444); 134module_param_named(use_prio, use_prio, bool, 0444);
137MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 135MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
138 "(0/1, default 0)");
139 136
140int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 137int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
141module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 138module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
163 for (i = 0; i < dev->caps.num_ports - 1; i++) { 160 for (i = 0; i < dev->caps.num_ports - 1; i++) {
164 if (port_type[i] != port_type[i + 1]) { 161 if (port_type[i] != port_type[i + 1]) {
165 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 162 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
166 mlx4_err(dev, "Only same port types supported " 163 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
167 "on this HCA, aborting.\n");
168 return -EINVAL; 164 return -EINVAL;
169 } 165 }
170 } 166 }
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
172 168
173 for (i = 0; i < dev->caps.num_ports; i++) { 169 for (i = 0; i < dev->caps.num_ports; i++) {
174 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 170 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
175 mlx4_err(dev, "Requested port type for port %d is not " 171 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
176 "supported on this HCA\n", i + 1); 172 i + 1);
177 return -EINVAL; 173 return -EINVAL;
178 } 174 }
179 } 175 }
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
195 191
196 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 192 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
197 if (err) { 193 if (err) {
198 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 194 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
199 return err; 195 return err;
200 } 196 }
201 197
202 if (dev_cap->min_page_sz > PAGE_SIZE) { 198 if (dev_cap->min_page_sz > PAGE_SIZE) {
203 mlx4_err(dev, "HCA minimum page size of %d bigger than " 199 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
204 "kernel PAGE_SIZE of %ld, aborting.\n",
205 dev_cap->min_page_sz, PAGE_SIZE); 200 dev_cap->min_page_sz, PAGE_SIZE);
206 return -ENODEV; 201 return -ENODEV;
207 } 202 }
208 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 203 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
209 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 204 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
210 "aborting.\n",
211 dev_cap->num_ports, MLX4_MAX_PORTS); 205 dev_cap->num_ports, MLX4_MAX_PORTS);
212 return -ENODEV; 206 return -ENODEV;
213 } 207 }
214 208
215 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 209 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
216 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 210 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
217 "PCI resource 2 size of 0x%llx, aborting.\n",
218 dev_cap->uar_size, 211 dev_cap->uar_size,
219 (unsigned long long) pci_resource_len(dev->pdev, 2)); 212 (unsigned long long) pci_resource_len(dev->pdev, 2));
220 return -ENODEV; 213 return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
296 289
297 dev->caps.log_num_macs = log_num_mac; 290 dev->caps.log_num_macs = log_num_mac;
298 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 291 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
299 dev->caps.log_num_prios = use_prio ? 3 : 0;
300 292
301 for (i = 1; i <= dev->caps.num_ports; ++i) { 293 for (i = 1; i <= dev->caps.num_ports; ++i) {
302 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 294 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
347 339
348 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 340 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
349 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 341 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
350 mlx4_warn(dev, "Requested number of MACs is too much " 342 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
351 "for port %d, reducing to %d.\n",
352 i, 1 << dev->caps.log_num_macs); 343 i, 1 << dev->caps.log_num_macs);
353 } 344 }
354 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 345 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
355 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 346 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
356 mlx4_warn(dev, "Requested number of VLANs is too much " 347 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
357 "for port %d, reducing to %d.\n",
358 i, 1 << dev->caps.log_num_vlans); 348 i, 1 << dev->caps.log_num_vlans);
359 } 349 }
360 } 350 }
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
366 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 356 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
367 (1 << dev->caps.log_num_macs) * 357 (1 << dev->caps.log_num_macs) *
368 (1 << dev->caps.log_num_vlans) * 358 (1 << dev->caps.log_num_vlans) *
369 (1 << dev->caps.log_num_prios) *
370 dev->caps.num_ports; 359 dev->caps.num_ports;
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 360 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
372 361
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
584 memset(&hca_param, 0, sizeof(hca_param)); 573 memset(&hca_param, 0, sizeof(hca_param));
585 err = mlx4_QUERY_HCA(dev, &hca_param); 574 err = mlx4_QUERY_HCA(dev, &hca_param);
586 if (err) { 575 if (err) {
587 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 576 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
588 return err; 577 return err;
589 } 578 }
590 579
591 /*fail if the hca has an unknown capability */ 580 /* fail if the hca has an unknown global capability
592 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 581 * at this time global_caps should be always zeroed
593 HCA_GLOBAL_CAP_MASK) { 582 */
583 if (hca_param.global_caps) {
594 mlx4_err(dev, "Unknown hca global capabilities\n"); 584 mlx4_err(dev, "Unknown hca global capabilities\n");
595 return -ENOSYS; 585 return -ENOSYS;
596 } 586 }
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
603 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 593 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
604 err = mlx4_dev_cap(dev, &dev_cap); 594 err = mlx4_dev_cap(dev, &dev_cap);
605 if (err) { 595 if (err) {
606 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 596 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
607 return err; 597 return err;
608 } 598 }
609 599
610 err = mlx4_QUERY_FW(dev); 600 err = mlx4_QUERY_FW(dev);
611 if (err) 601 if (err)
612 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 602 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
613 603
614 page_size = ~dev->caps.page_size_cap + 1; 604 page_size = ~dev->caps.page_size_cap + 1;
615 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 605 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
616 if (page_size > PAGE_SIZE) { 606 if (page_size > PAGE_SIZE) {
617 mlx4_err(dev, "HCA minimum page size of %d bigger than " 607 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
618 "kernel PAGE_SIZE of %ld, aborting.\n",
619 page_size, PAGE_SIZE); 608 page_size, PAGE_SIZE);
620 return -ENODEV; 609 return -ENODEV;
621 } 610 }
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
633 memset(&func_cap, 0, sizeof(func_cap)); 622 memset(&func_cap, 0, sizeof(func_cap));
634 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 623 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
635 if (err) { 624 if (err) {
636 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", 625 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
637 err); 626 err);
638 return err; 627 return err;
639 } 628 }
640 629
@@ -661,8 +650,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
661 dev->caps.num_amgms = 0; 650 dev->caps.num_amgms = 0;
662 651
663 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 652 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
664 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 653 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
665 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 654 dev->caps.num_ports, MLX4_MAX_PORTS);
666 return -ENODEV; 655 return -ENODEV;
667 } 656 }
668 657
@@ -682,8 +671,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
682 for (i = 1; i <= dev->caps.num_ports; ++i) { 671 for (i = 1; i <= dev->caps.num_ports; ++i) {
683 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 672 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
684 if (err) { 673 if (err) {
685 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" 674 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
686 " port %d, aborting (%d).\n", i, err); 675 i, err);
687 goto err_mem; 676 goto err_mem;
688 } 677 }
689 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 678 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
@@ -702,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
702 if (dev->caps.uar_page_size * (dev->caps.num_uars - 691 if (dev->caps.uar_page_size * (dev->caps.num_uars -
703 dev->caps.reserved_uars) > 692 dev->caps.reserved_uars) >
704 pci_resource_len(dev->pdev, 2)) { 693 pci_resource_len(dev->pdev, 2)) {
705 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 694 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
706 "PCI resource 2 size of 0x%llx, aborting.\n",
707 dev->caps.uar_page_size * dev->caps.num_uars, 695 dev->caps.uar_page_size * dev->caps.num_uars,
708 (unsigned long long) pci_resource_len(dev->pdev, 2)); 696 (unsigned long long) pci_resource_len(dev->pdev, 2));
709 goto err_mem; 697 goto err_mem;
@@ -725,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
725 } 713 }
726 714
727 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 715 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
728 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); 716 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
729 717
730 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 718 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
731 719
@@ -791,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
791 dev->caps.port_type[port] = port_types[port - 1]; 779 dev->caps.port_type[port] = port_types[port - 1];
792 err = mlx4_SET_PORT(dev, port, -1); 780 err = mlx4_SET_PORT(dev, port, -1);
793 if (err) { 781 if (err) {
794 mlx4_err(dev, "Failed to set port %d, " 782 mlx4_err(dev, "Failed to set port %d, aborting\n",
795 "aborting\n", port); 783 port);
796 goto out; 784 goto out;
797 } 785 }
798 } 786 }
@@ -875,9 +863,7 @@ static ssize_t set_port_type(struct device *dev,
875 } 863 }
876 } 864 }
877 if (err) { 865 if (err) {
878 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 866 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
879 "Set only 'eth' or 'ib' for both ports "
880 "(should be the same)\n");
881 goto out; 867 goto out;
882 } 868 }
883 869
@@ -982,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
982 mlx4_CLOSE_PORT(mdev, port); 968 mlx4_CLOSE_PORT(mdev, port);
983 err = mlx4_SET_PORT(mdev, port, -1); 969 err = mlx4_SET_PORT(mdev, port, -1);
984 if (err) { 970 if (err) {
985 mlx4_err(mdev, "Failed to set port %d, " 971 mlx4_err(mdev, "Failed to set port %d, aborting\n",
986 "aborting\n", port); 972 port);
987 goto err_set_port; 973 goto err_set_port;
988 } 974 }
989 } 975 }
@@ -1002,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
1002 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 988 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1003 GFP_HIGHUSER | __GFP_NOWARN, 0); 989 GFP_HIGHUSER | __GFP_NOWARN, 0);
1004 if (!priv->fw.fw_icm) { 990 if (!priv->fw.fw_icm) {
1005 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 991 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1006 return -ENOMEM; 992 return -ENOMEM;
1007 } 993 }
1008 994
1009 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 995 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1010 if (err) { 996 if (err) {
1011 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 997 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1012 goto err_free; 998 goto err_free;
1013 } 999 }
1014 1000
1015 err = mlx4_RUN_FW(dev); 1001 err = mlx4_RUN_FW(dev);
1016 if (err) { 1002 if (err) {
1017 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 1003 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1018 goto err_unmap_fa; 1004 goto err_unmap_fa;
1019 } 1005 }
1020 1006
@@ -1098,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1098 1084
1099 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1085 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1100 if (err) { 1086 if (err) {
1101 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 1087 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1102 return err; 1088 return err;
1103 } 1089 }
1104 1090
1105 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 1091 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1106 (unsigned long long) icm_size >> 10, 1092 (unsigned long long) icm_size >> 10,
1107 (unsigned long long) aux_pages << 2); 1093 (unsigned long long) aux_pages << 2);
1108 1094
1109 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1095 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1110 GFP_HIGHUSER | __GFP_NOWARN, 0); 1096 GFP_HIGHUSER | __GFP_NOWARN, 0);
1111 if (!priv->fw.aux_icm) { 1097 if (!priv->fw.aux_icm) {
1112 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 1098 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1113 return -ENOMEM; 1099 return -ENOMEM;
1114 } 1100 }
1115 1101
1116 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1102 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1117 if (err) { 1103 if (err) {
1118 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 1104 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1119 goto err_free_aux; 1105 goto err_free_aux;
1120 } 1106 }
1121 1107
1122 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1108 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1123 if (err) { 1109 if (err) {
1124 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 1110 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1125 goto err_unmap_aux; 1111 goto err_unmap_aux;
1126 } 1112 }
1127 1113
@@ -1132,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1132 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1118 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1133 num_eqs, num_eqs, 0, 0); 1119 num_eqs, num_eqs, 0, 0);
1134 if (err) { 1120 if (err) {
1135 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 1121 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1136 goto err_unmap_cmpt; 1122 goto err_unmap_cmpt;
1137 } 1123 }
1138 1124
@@ -1153,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1153 dev->caps.num_mtts, 1139 dev->caps.num_mtts,
1154 dev->caps.reserved_mtts, 1, 0); 1140 dev->caps.reserved_mtts, 1, 0);
1155 if (err) { 1141 if (err) {
1156 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 1142 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1157 goto err_unmap_eq; 1143 goto err_unmap_eq;
1158 } 1144 }
1159 1145
@@ -1163,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1163 dev->caps.num_mpts, 1149 dev->caps.num_mpts,
1164 dev->caps.reserved_mrws, 1, 1); 1150 dev->caps.reserved_mrws, 1, 1);
1165 if (err) { 1151 if (err) {
1166 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 1152 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1167 goto err_unmap_mtt; 1153 goto err_unmap_mtt;
1168 } 1154 }
1169 1155
@@ -1174,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1174 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1160 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1175 0, 0); 1161 0, 0);
1176 if (err) { 1162 if (err) {
1177 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 1163 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1178 goto err_unmap_dmpt; 1164 goto err_unmap_dmpt;
1179 } 1165 }
1180 1166
@@ -1185,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1185 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1171 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1186 0, 0); 1172 0, 0);
1187 if (err) { 1173 if (err) {
1188 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 1174 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1189 goto err_unmap_qp; 1175 goto err_unmap_qp;
1190 } 1176 }
1191 1177
@@ -1196,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1196 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1182 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1197 0, 0); 1183 0, 0);
1198 if (err) { 1184 if (err) {
1199 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 1185 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1200 goto err_unmap_auxc; 1186 goto err_unmap_auxc;
1201 } 1187 }
1202 1188
@@ -1217,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1217 dev->caps.num_cqs, 1203 dev->caps.num_cqs,
1218 dev->caps.reserved_cqs, 0, 0); 1204 dev->caps.reserved_cqs, 0, 0);
1219 if (err) { 1205 if (err) {
1220 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 1206 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1221 goto err_unmap_rdmarc; 1207 goto err_unmap_rdmarc;
1222 } 1208 }
1223 1209
@@ -1227,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1227 dev->caps.num_srqs, 1213 dev->caps.num_srqs,
1228 dev->caps.reserved_srqs, 0, 0); 1214 dev->caps.reserved_srqs, 0, 0);
1229 if (err) { 1215 if (err) {
1230 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 1216 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1231 goto err_unmap_cq; 1217 goto err_unmap_cq;
1232 } 1218 }
1233 1219
@@ -1245,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1245 dev->caps.num_mgms + dev->caps.num_amgms, 1231 dev->caps.num_mgms + dev->caps.num_amgms,
1246 0, 0); 1232 0, 0);
1247 if (err) { 1233 if (err) {
1248 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 1234 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1249 goto err_unmap_srq; 1235 goto err_unmap_srq;
1250 } 1236 }
1251 1237
@@ -1322,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1322 1308
1323 mutex_lock(&priv->cmd.slave_cmd_mutex); 1309 mutex_lock(&priv->cmd.slave_cmd_mutex);
1324 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1310 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1325 mlx4_warn(dev, "Failed to close slave function.\n"); 1311 mlx4_warn(dev, "Failed to close slave function\n");
1326 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1312 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1327} 1313}
1328 1314
@@ -1420,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1420 u32 cmd_channel_ver; 1406 u32 cmd_channel_ver;
1421 1407
1422 if (atomic_read(&pf_loading)) { 1408 if (atomic_read(&pf_loading)) {
1423 mlx4_warn(dev, "PF is not ready. Deferring probe\n"); 1409 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1424 return -EPROBE_DEFER; 1410 return -EPROBE_DEFER;
1425 } 1411 }
1426 1412
@@ -1433,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1433 * NUM_OF_RESET_RETRIES times before leaving.*/ 1419 * NUM_OF_RESET_RETRIES times before leaving.*/
1434 if (ret_from_reset) { 1420 if (ret_from_reset) {
1435 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1421 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1436 mlx4_warn(dev, "slave is currently in the " 1422 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1437 "middle of FLR. Deferring probe.\n");
1438 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1423 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1439 return -EPROBE_DEFER; 1424 return -EPROBE_DEFER;
1440 } else 1425 } else
@@ -1448,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1448 1433
1449 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1434 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1450 MLX4_COMM_GET_IF_REV(slave_read)) { 1435 MLX4_COMM_GET_IF_REV(slave_read)) {
1451 mlx4_err(dev, "slave driver version is not supported" 1436 mlx4_err(dev, "slave driver version is not supported by the master\n");
1452 " by the master\n");
1453 goto err; 1437 goto err;
1454 } 1438 }
1455 1439
@@ -1527,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1527 1511
1528 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1512 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1529 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1513 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1530 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " 1514 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1531 "set to use B0 steering. Falling back to A0 steering mode.\n");
1532 } 1515 }
1533 dev->oper_log_mgm_entry_size = 1516 dev->oper_log_mgm_entry_size =
1534 mlx4_log_num_mgm_entry_size > 0 ? 1517 mlx4_log_num_mgm_entry_size > 0 ?
@@ -1536,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1536 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1519 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1537 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1520 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1538 } 1521 }
1539 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " 1522 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1540 "modparam log_num_mgm_entry_size = %d\n",
1541 mlx4_steering_mode_str(dev->caps.steering_mode), 1523 mlx4_steering_mode_str(dev->caps.steering_mode),
1542 dev->oper_log_mgm_entry_size, 1524 dev->oper_log_mgm_entry_size,
1543 mlx4_log_num_mgm_entry_size); 1525 mlx4_log_num_mgm_entry_size);
@@ -1571,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1571 err = mlx4_QUERY_FW(dev); 1553 err = mlx4_QUERY_FW(dev);
1572 if (err) { 1554 if (err) {
1573 if (err == -EACCES) 1555 if (err == -EACCES)
1574 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1556 mlx4_info(dev, "non-primary physical function, skipping\n");
1575 else 1557 else
1576 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1558 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
1577 return err; 1559 return err;
1578 } 1560 }
1579 1561
1580 err = mlx4_load_fw(dev); 1562 err = mlx4_load_fw(dev);
1581 if (err) { 1563 if (err) {
1582 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1564 mlx4_err(dev, "Failed to start FW, aborting\n");
1583 return err; 1565 return err;
1584 } 1566 }
1585 1567
@@ -1591,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1591 1573
1592 err = mlx4_dev_cap(dev, &dev_cap); 1574 err = mlx4_dev_cap(dev, &dev_cap);
1593 if (err) { 1575 if (err) {
1594 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1576 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1595 goto err_stop_fw; 1577 goto err_stop_fw;
1596 } 1578 }
1597 1579
@@ -1632,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1632 1614
1633 err = mlx4_INIT_HCA(dev, &init_hca); 1615 err = mlx4_INIT_HCA(dev, &init_hca);
1634 if (err) { 1616 if (err) {
1635 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1617 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1636 goto err_free_icm; 1618 goto err_free_icm;
1637 } 1619 }
1638 /* 1620 /*
@@ -1643,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1643 memset(&init_hca, 0, sizeof(init_hca)); 1625 memset(&init_hca, 0, sizeof(init_hca));
1644 err = mlx4_QUERY_HCA(dev, &init_hca); 1626 err = mlx4_QUERY_HCA(dev, &init_hca);
1645 if (err) { 1627 if (err) {
1646 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); 1628 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
1647 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1629 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1648 } else { 1630 } else {
1649 dev->caps.hca_core_clock = 1631 dev->caps.hca_core_clock =
@@ -1656,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1656 if (!dev->caps.hca_core_clock) { 1638 if (!dev->caps.hca_core_clock) {
1657 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1639 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1658 mlx4_err(dev, 1640 mlx4_err(dev,
1659 "HCA frequency is 0. Timestamping is not supported."); 1641 "HCA frequency is 0 - timestamping is not supported\n");
1660 } else if (map_internal_clock(dev)) { 1642 } else if (map_internal_clock(dev)) {
1661 /* 1643 /*
1662 * Map internal clock, 1644 * Map internal clock,
1663 * in case of failure disable timestamping 1645 * in case of failure disable timestamping
1664 */ 1646 */
1665 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1647 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1666 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); 1648 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1667 } 1649 }
1668 } 1650 }
1669 } else { 1651 } else {
@@ -1690,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1690 1672
1691 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1673 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1692 if (err) { 1674 if (err) {
1693 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1675 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
1694 goto unmap_bf; 1676 goto unmap_bf;
1695 } 1677 }
1696 1678
@@ -1808,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1808 1790
1809 err = mlx4_init_uar_table(dev); 1791 err = mlx4_init_uar_table(dev);
1810 if (err) { 1792 if (err) {
1811 mlx4_err(dev, "Failed to initialize " 1793 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
1812 "user access region table, aborting.\n"); 1794 return err;
1813 return err;
1814 } 1795 }
1815 1796
1816 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1797 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1817 if (err) { 1798 if (err) {
1818 mlx4_err(dev, "Failed to allocate driver access region, " 1799 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
1819 "aborting.\n");
1820 goto err_uar_table_free; 1800 goto err_uar_table_free;
1821 } 1801 }
1822 1802
1823 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1803 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1824 if (!priv->kar) { 1804 if (!priv->kar) {
1825 mlx4_err(dev, "Couldn't map kernel access region, " 1805 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
1826 "aborting.\n");
1827 err = -ENOMEM; 1806 err = -ENOMEM;
1828 goto err_uar_free; 1807 goto err_uar_free;
1829 } 1808 }
1830 1809
1831 err = mlx4_init_pd_table(dev); 1810 err = mlx4_init_pd_table(dev);
1832 if (err) { 1811 if (err) {
1833 mlx4_err(dev, "Failed to initialize " 1812 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
1834 "protection domain table, aborting.\n");
1835 goto err_kar_unmap; 1813 goto err_kar_unmap;
1836 } 1814 }
1837 1815
1838 err = mlx4_init_xrcd_table(dev); 1816 err = mlx4_init_xrcd_table(dev);
1839 if (err) { 1817 if (err) {
1840 mlx4_err(dev, "Failed to initialize " 1818 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
1841 "reliable connection domain table, aborting.\n");
1842 goto err_pd_table_free; 1819 goto err_pd_table_free;
1843 } 1820 }
1844 1821
1845 err = mlx4_init_mr_table(dev); 1822 err = mlx4_init_mr_table(dev);
1846 if (err) { 1823 if (err) {
1847 mlx4_err(dev, "Failed to initialize " 1824 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
1848 "memory region table, aborting.\n");
1849 goto err_xrcd_table_free; 1825 goto err_xrcd_table_free;
1850 } 1826 }
1851 1827
1852 if (!mlx4_is_slave(dev)) { 1828 if (!mlx4_is_slave(dev)) {
1853 err = mlx4_init_mcg_table(dev); 1829 err = mlx4_init_mcg_table(dev);
1854 if (err) { 1830 if (err) {
1855 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); 1831 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
1856 goto err_mr_table_free; 1832 goto err_mr_table_free;
1857 } 1833 }
1858 } 1834 }
1859 1835
1860 err = mlx4_init_eq_table(dev); 1836 err = mlx4_init_eq_table(dev);
1861 if (err) { 1837 if (err) {
1862 mlx4_err(dev, "Failed to initialize " 1838 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
1863 "event queue table, aborting.\n");
1864 goto err_mcg_table_free; 1839 goto err_mcg_table_free;
1865 } 1840 }
1866 1841
1867 err = mlx4_cmd_use_events(dev); 1842 err = mlx4_cmd_use_events(dev);
1868 if (err) { 1843 if (err) {
1869 mlx4_err(dev, "Failed to switch to event-driven " 1844 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
1870 "firmware commands, aborting.\n");
1871 goto err_eq_table_free; 1845 goto err_eq_table_free;
1872 } 1846 }
1873 1847
1874 err = mlx4_NOP(dev); 1848 err = mlx4_NOP(dev);
1875 if (err) { 1849 if (err) {
1876 if (dev->flags & MLX4_FLAG_MSI_X) { 1850 if (dev->flags & MLX4_FLAG_MSI_X) {
1877 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1851 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
1878 "interrupt IRQ %d).\n",
1879 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1852 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1880 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1853 mlx4_warn(dev, "Trying again without MSI-X\n");
1881 } else { 1854 } else {
1882 mlx4_err(dev, "NOP command failed to generate interrupt " 1855 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
1883 "(IRQ %d), aborting.\n",
1884 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1856 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1885 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1857 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1886 } 1858 }
@@ -1892,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1892 1864
1893 err = mlx4_init_cq_table(dev); 1865 err = mlx4_init_cq_table(dev);
1894 if (err) { 1866 if (err) {
1895 mlx4_err(dev, "Failed to initialize " 1867 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
1896 "completion queue table, aborting.\n");
1897 goto err_cmd_poll; 1868 goto err_cmd_poll;
1898 } 1869 }
1899 1870
1900 err = mlx4_init_srq_table(dev); 1871 err = mlx4_init_srq_table(dev);
1901 if (err) { 1872 if (err) {
1902 mlx4_err(dev, "Failed to initialize " 1873 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
1903 "shared receive queue table, aborting.\n");
1904 goto err_cq_table_free; 1874 goto err_cq_table_free;
1905 } 1875 }
1906 1876
1907 err = mlx4_init_qp_table(dev); 1877 err = mlx4_init_qp_table(dev);
1908 if (err) { 1878 if (err) {
1909 mlx4_err(dev, "Failed to initialize " 1879 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
1910 "queue pair table, aborting.\n");
1911 goto err_srq_table_free; 1880 goto err_srq_table_free;
1912 } 1881 }
1913 1882
1914 err = mlx4_init_counters_table(dev); 1883 err = mlx4_init_counters_table(dev);
1915 if (err && err != -ENOENT) { 1884 if (err && err != -ENOENT) {
1916 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1885 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
1917 goto err_qp_table_free; 1886 goto err_qp_table_free;
1918 } 1887 }
1919 1888
@@ -1923,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1923 err = mlx4_get_port_ib_caps(dev, port, 1892 err = mlx4_get_port_ib_caps(dev, port,
1924 &ib_port_default_caps); 1893 &ib_port_default_caps);
1925 if (err) 1894 if (err)
1926 mlx4_warn(dev, "failed to get port %d default " 1895 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
1927 "ib capabilities (%d). Continuing " 1896 port, err);
1928 "with caps = 0\n", port, err);
1929 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1897 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1930 1898
1931 /* initialize per-slave default ib port capabilities */ 1899 /* initialize per-slave default ib port capabilities */
@@ -1935,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1935 if (i == mlx4_master_func_num(dev)) 1903 if (i == mlx4_master_func_num(dev))
1936 continue; 1904 continue;
1937 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 1905 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1938 ib_port_default_caps; 1906 ib_port_default_caps;
1939 } 1907 }
1940 } 1908 }
1941 1909
@@ -1948,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1948 dev->caps.pkey_table_len[port] : -1); 1916 dev->caps.pkey_table_len[port] : -1);
1949 if (err) { 1917 if (err) {
1950 mlx4_err(dev, "Failed to set port %d, aborting\n", 1918 mlx4_err(dev, "Failed to set port %d, aborting\n",
1951 port); 1919 port);
1952 goto err_counters_table_free; 1920 goto err_counters_table_free;
1953 } 1921 }
1954 } 1922 }
@@ -2024,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2024 kfree(entries); 1992 kfree(entries);
2025 goto no_msi; 1993 goto no_msi;
2026 } else if (nreq < MSIX_LEGACY_SZ + 1994 } else if (nreq < MSIX_LEGACY_SZ +
2027 dev->caps.num_ports * MIN_MSIX_P_PORT) { 1995 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2028 /*Working in legacy mode , all EQ's shared*/ 1996 /*Working in legacy mode , all EQ's shared*/
2029 dev->caps.comp_pool = 0; 1997 dev->caps.comp_pool = 0;
2030 dev->caps.num_comp_vectors = nreq - 1; 1998 dev->caps.num_comp_vectors = nreq - 1;
@@ -2225,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2225 2193
2226 err = pci_enable_device(pdev); 2194 err = pci_enable_device(pdev);
2227 if (err) { 2195 if (err) {
2228 dev_err(&pdev->dev, "Cannot enable PCI device, " 2196 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
2229 "aborting.\n");
2230 return err; 2197 return err;
2231 } 2198 }
2232 2199
@@ -2273,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2273 */ 2240 */
2274 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 2241 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2275 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2242 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2276 dev_err(&pdev->dev, "Missing DCS, aborting." 2243 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2277 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2278 pci_dev_data, pci_resource_flags(pdev, 0)); 2244 pci_dev_data, pci_resource_flags(pdev, 0));
2279 err = -ENODEV; 2245 err = -ENODEV;
2280 goto err_disable_pdev; 2246 goto err_disable_pdev;
2281 } 2247 }
2282 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 2248 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2283 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 2249 dev_err(&pdev->dev, "Missing UAR, aborting\n");
2284 err = -ENODEV; 2250 err = -ENODEV;
2285 goto err_disable_pdev; 2251 goto err_disable_pdev;
2286 } 2252 }
@@ -2295,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2295 2261
2296 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2262 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2297 if (err) { 2263 if (err) {
2298 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 2264 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
2299 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2265 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2300 if (err) { 2266 if (err) {
2301 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 2267 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
2302 goto err_release_regions; 2268 goto err_release_regions;
2303 } 2269 }
2304 } 2270 }
2305 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2271 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2306 if (err) { 2272 if (err) {
2307 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 2273 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2308 "consistent PCI DMA mask.\n");
2309 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2274 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2310 if (err) { 2275 if (err) {
2311 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 2276 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
2312 "aborting.\n");
2313 goto err_release_regions; 2277 goto err_release_regions;
2314 } 2278 }
2315 } 2279 }
@@ -2340,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2340 if (total_vfs) { 2304 if (total_vfs) {
2341 unsigned vfs_offset = 0; 2305 unsigned vfs_offset = 0;
2342 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 2306 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2343 vfs_offset + nvfs[i] < extended_func_num(pdev); 2307 vfs_offset + nvfs[i] < extended_func_num(pdev);
2344 vfs_offset += nvfs[i], i++) 2308 vfs_offset += nvfs[i], i++)
2345 ; 2309 ;
2346 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 2310 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2366,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2366 if (err < 0) 2330 if (err < 0)
2367 goto err_free_dev; 2331 goto err_free_dev;
2368 else { 2332 else {
2369 mlx4_warn(dev, "Multiple PFs not yet supported." 2333 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2370 " Skipping PF.\n");
2371 err = -EINVAL; 2334 err = -EINVAL;
2372 goto err_free_dev; 2335 goto err_free_dev;
2373 } 2336 }
@@ -2377,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2377 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", 2340 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2378 total_vfs); 2341 total_vfs);
2379 dev->dev_vfs = kzalloc( 2342 dev->dev_vfs = kzalloc(
2380 total_vfs * sizeof(*dev->dev_vfs), 2343 total_vfs * sizeof(*dev->dev_vfs),
2381 GFP_KERNEL); 2344 GFP_KERNEL);
2382 if (NULL == dev->dev_vfs) { 2345 if (NULL == dev->dev_vfs) {
2383 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2346 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2384 err = 0; 2347 err = 0;
@@ -2386,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2386 atomic_inc(&pf_loading); 2349 atomic_inc(&pf_loading);
2387 err = pci_enable_sriov(pdev, total_vfs); 2350 err = pci_enable_sriov(pdev, total_vfs);
2388 if (err) { 2351 if (err) {
2389 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2352 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2390 err); 2353 err);
2391 atomic_dec(&pf_loading); 2354 atomic_dec(&pf_loading);
2392 err = 0; 2355 err = 0;
2393 } else { 2356 } else {
2394 mlx4_warn(dev, "Running in master mode\n"); 2357 mlx4_warn(dev, "Running in master mode\n");
2395 dev->flags |= MLX4_FLAG_SRIOV | 2358 dev->flags |= MLX4_FLAG_SRIOV |
2396 MLX4_FLAG_MASTER; 2359 MLX4_FLAG_MASTER;
2397 dev->num_vfs = total_vfs; 2360 dev->num_vfs = total_vfs;
2398 sriov_initialized = 1; 2361 sriov_initialized = 1;
2399 } 2362 }
@@ -2410,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2410 */ 2373 */
2411 err = mlx4_reset(dev); 2374 err = mlx4_reset(dev);
2412 if (err) { 2375 if (err) {
2413 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2376 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2414 goto err_rel_own; 2377 goto err_rel_own;
2415 } 2378 }
2416 } 2379 }
@@ -2418,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2418slave_start: 2381slave_start:
2419 err = mlx4_cmd_init(dev); 2382 err = mlx4_cmd_init(dev);
2420 if (err) { 2383 if (err) {
2421 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 2384 mlx4_err(dev, "Failed to init command interface, aborting\n");
2422 goto err_sriov; 2385 goto err_sriov;
2423 } 2386 }
2424 2387
@@ -2432,8 +2395,7 @@ slave_start:
2432 dev->num_slaves = 0; 2395 dev->num_slaves = 0;
2433 err = mlx4_multi_func_init(dev); 2396 err = mlx4_multi_func_init(dev);
2434 if (err) { 2397 if (err) {
2435 mlx4_err(dev, "Failed to init slave mfunc" 2398 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2436 " interface, aborting.\n");
2437 goto err_cmd; 2399 goto err_cmd;
2438 } 2400 }
2439 } 2401 }
@@ -2465,8 +2427,7 @@ slave_start:
2465 unsigned sum = 0; 2427 unsigned sum = 0;
2466 err = mlx4_multi_func_init(dev); 2428 err = mlx4_multi_func_init(dev);
2467 if (err) { 2429 if (err) {
2468 mlx4_err(dev, "Failed to init master mfunc" 2430 mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
2469 "interface, aborting.\n");
2470 goto err_close; 2431 goto err_close;
2471 } 2432 }
2472 if (sriov_initialized) { 2433 if (sriov_initialized) {
@@ -2477,10 +2438,7 @@ slave_start:
2477 if (ib_ports && 2438 if (ib_ports &&
2478 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2479 mlx4_err(dev, 2440 mlx4_err(dev,
2480 "Invalid syntax of num_vfs/probe_vfs " 2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2481 "with IB port. Single port VFs syntax"
2482 " is only supported when all ports "
2483 "are configured as ethernet\n");
2484 goto err_close; 2442 goto err_close;
2485 } 2443 }
2486 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { 2444 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2506,8 +2464,7 @@ slave_start:
2506 if ((mlx4_is_mfunc(dev)) && 2464 if ((mlx4_is_mfunc(dev)) &&
2507 !(dev->flags & MLX4_FLAG_MSI_X)) { 2465 !(dev->flags & MLX4_FLAG_MSI_X)) {
2508 err = -ENOSYS; 2466 err = -ENOSYS;
2509 mlx4_err(dev, "INTx is not supported in multi-function mode." 2467 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
2510 " aborting.\n");
2511 goto err_free_eq; 2468 goto err_free_eq;
2512 } 2469 }
2513 2470
@@ -2660,7 +2617,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
2660 /* in SRIOV it is not allowed to unload the pf's 2617 /* in SRIOV it is not allowed to unload the pf's
2661 * driver while there are alive vf's */ 2618 * driver while there are alive vf's */
2662 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) 2619 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2663 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 2620 pr_warn("Removing PF when there are assigned VF's !!!\n");
2664 mlx4_stop_sense(dev); 2621 mlx4_stop_sense(dev);
2665 mlx4_unregister_device(dev); 2622 mlx4_unregister_device(dev);
2666 2623
@@ -2824,7 +2781,7 @@ static struct pci_driver mlx4_driver = {
2824 .name = DRV_NAME, 2781 .name = DRV_NAME,
2825 .id_table = mlx4_pci_table, 2782 .id_table = mlx4_pci_table,
2826 .probe = mlx4_init_one, 2783 .probe = mlx4_init_one,
2827 .shutdown = mlx4_remove_one, 2784 .shutdown = __mlx4_remove_one,
2828 .remove = mlx4_remove_one, 2785 .remove = mlx4_remove_one,
2829 .err_handler = &mlx4_err_handler, 2786 .err_handler = &mlx4_err_handler,
2830}; 2787};
@@ -2832,33 +2789,36 @@ static struct pci_driver mlx4_driver = {
2832static int __init mlx4_verify_params(void) 2789static int __init mlx4_verify_params(void)
2833{ 2790{
2834 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2791 if ((log_num_mac < 0) || (log_num_mac > 7)) {
2835 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2792 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
2836 return -1; 2793 return -1;
2837 } 2794 }
2838 2795
2839 if (log_num_vlan != 0) 2796 if (log_num_vlan != 0)
2840 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2797 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2841 MLX4_LOG_NUM_VLANS); 2798 MLX4_LOG_NUM_VLANS);
2799
2800 if (use_prio != 0)
2801 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
2842 2802
2843 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2803 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2844 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2804 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
2805 log_mtts_per_seg);
2845 return -1; 2806 return -1;
2846 } 2807 }
2847 2808
2848 /* Check if module param for ports type has legal combination */ 2809 /* Check if module param for ports type has legal combination */
2849 if (port_type_array[0] == false && port_type_array[1] == true) { 2810 if (port_type_array[0] == false && port_type_array[1] == true) {
2850 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2811 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2851 port_type_array[0] = true; 2812 port_type_array[0] = true;
2852 } 2813 }
2853 2814
2854 if (mlx4_log_num_mgm_entry_size != -1 && 2815 if (mlx4_log_num_mgm_entry_size != -1 &&
2855 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 2816 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2856 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { 2817 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2857 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " 2818 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
2858 "in legal range (-1 or %d..%d)\n", 2819 mlx4_log_num_mgm_entry_size,
2859 mlx4_log_num_mgm_entry_size, 2820 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2860 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 2821 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2861 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2862 return -1; 2822 return -1;
2863 } 2823 }
2864 2824
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..4c36def8e10f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
638 638
639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
640 if (*index != hash) { 640 if (*index != hash) {
641 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 641 mlx4_err(dev, "Found zero MGID in AMGM\n");
642 err = -EINVAL; 642 err = -EINVAL;
643 } 643 }
644 return err; 644 return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
874 mlx4_err(dev, "%s", buf); 874 mlx4_err(dev, "%s", buf);
875 875
876 if (len >= BUF_SIZE) 876 if (len >= BUF_SIZE)
877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
878} 878}
879 879
880int mlx4_flow_attach(struct mlx4_dev *dev, 880int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
897 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 897 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
898 if (ret < 0) { 898 if (ret < 0) {
899 mlx4_free_cmd_mailbox(dev, mailbox); 899 mlx4_free_cmd_mailbox(dev, mailbox);
900 return -EINVAL; 900 return ret;
901 } 901 }
902 size += ret; 902 size += ret;
903 } 903 }
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
906 if (ret == -ENOMEM) 906 if (ret == -ENOMEM)
907 mlx4_err_rule(dev, 907 mlx4_err_rule(dev,
908 "mcg table is full. Fail to register network rule.\n", 908 "mcg table is full. Fail to register network rule\n",
909 rule); 909 rule);
910 else if (ret) 910 else if (ret)
911 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 911 mlx4_err_rule(dev, "Fail to register network rule\n", rule);
912 912
913 mlx4_free_cmd_mailbox(dev, mailbox); 913 mlx4_free_cmd_mailbox(dev, mailbox);
914 914
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
994 994
995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
996 if (members_count == dev->caps.num_qp_per_mgm) { 996 if (members_count == dev->caps.num_qp_per_mgm) {
997 mlx4_err(dev, "MGM at index %x is full.\n", index); 997 mlx4_err(dev, "MGM at index %x is full\n", index);
998 err = -ENOMEM; 998 err = -ENOMEM;
999 goto out; 999 goto out;
1000 } 1000 }
@@ -1042,7 +1042,7 @@ out:
1042 } 1042 }
1043 if (err && link && index != -1) { 1043 if (err && link && index != -1) {
1044 if (index < dev->caps.num_mgms) 1044 if (index < dev->caps.num_mgms)
1045 mlx4_warn(dev, "Got AMGM index %d < %d", 1045 mlx4_warn(dev, "Got AMGM index %d < %d\n",
1046 index, dev->caps.num_mgms); 1046 index, dev->caps.num_mgms);
1047 else 1047 else
1048 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1048 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1133 1133
1134 if (amgm_index) { 1134 if (amgm_index) {
1135 if (amgm_index < dev->caps.num_mgms) 1135 if (amgm_index < dev->caps.num_mgms)
1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
1137 index, amgm_index, dev->caps.num_mgms); 1137 index, amgm_index, dev->caps.num_mgms);
1138 else 1138 else
1139 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1139 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1153 goto out; 1153 goto out;
1154 1154
1155 if (index < dev->caps.num_mgms) 1155 if (index < dev->caps.num_mgms)
1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
1157 prev, index, dev->caps.num_mgms); 1157 prev, index, dev->caps.num_mgms);
1158 else 1158 else
1159 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1159 mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7a0665beebb1..1d8af7336807 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -221,18 +221,19 @@ extern int mlx4_debug_level;
221#define mlx4_debug_level (0) 221#define mlx4_debug_level (0)
222#endif /* CONFIG_MLX4_DEBUG */ 222#endif /* CONFIG_MLX4_DEBUG */
223 223
224#define mlx4_dbg(mdev, format, arg...) \ 224#define mlx4_dbg(mdev, format, ...) \
225do { \ 225do { \
226 if (mlx4_debug_level) \ 226 if (mlx4_debug_level) \
227 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ 227 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
228 ##__VA_ARGS__); \
228} while (0) 229} while (0)
229 230
230#define mlx4_err(mdev, format, arg...) \ 231#define mlx4_err(mdev, format, ...) \
231 dev_err(&mdev->pdev->dev, format, ##arg) 232 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
232#define mlx4_info(mdev, format, arg...) \ 233#define mlx4_info(mdev, format, ...) \
233 dev_info(&mdev->pdev->dev, format, ##arg) 234 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
234#define mlx4_warn(mdev, format, arg...) \ 235#define mlx4_warn(mdev, format, ...) \
235 dev_warn(&mdev->pdev->dev, format, ##arg) 236 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
236 237
237extern int mlx4_log_num_mgm_entry_size; 238extern int mlx4_log_num_mgm_entry_size;
238extern int log_mtts_per_seg; 239extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..0e15295bedd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
313 unsigned long csum_ok; 313 unsigned long csum_ok;
314 unsigned long csum_none; 314 unsigned long csum_none;
315 int hwtstamp_rx_filter; 315 int hwtstamp_rx_filter;
316 cpumask_var_t affinity_mask;
316}; 317};
317 318
318struct mlx4_en_cq { 319struct mlx4_en_cq {
@@ -830,26 +831,26 @@ __printf(3, 4)
830int en_print(const char *level, const struct mlx4_en_priv *priv, 831int en_print(const char *level, const struct mlx4_en_priv *priv,
831 const char *format, ...); 832 const char *format, ...);
832 833
833#define en_dbg(mlevel, priv, format, arg...) \ 834#define en_dbg(mlevel, priv, format, ...) \
834do { \ 835do { \
835 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 836 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
836 en_print(KERN_DEBUG, priv, format, ##arg); \ 837 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
837} while (0) 838} while (0)
838#define en_warn(priv, format, arg...) \ 839#define en_warn(priv, format, ...) \
839 en_print(KERN_WARNING, priv, format, ##arg) 840 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
840#define en_err(priv, format, arg...) \ 841#define en_err(priv, format, ...) \
841 en_print(KERN_ERR, priv, format, ##arg) 842 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
842#define en_info(priv, format, arg...) \ 843#define en_info(priv, format, ...) \
843 en_print(KERN_INFO, priv, format, ## arg) 844 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
844 845
845#define mlx4_err(mdev, format, arg...) \ 846#define mlx4_err(mdev, format, ...) \
846 pr_err("%s %s: " format, DRV_NAME, \ 847 pr_err(DRV_NAME " %s: " format, \
847 dev_name(&mdev->pdev->dev), ##arg) 848 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
848#define mlx4_info(mdev, format, arg...) \ 849#define mlx4_info(mdev, format, ...) \
849 pr_info("%s %s: " format, DRV_NAME, \ 850 pr_info(DRV_NAME " %s: " format, \
850 dev_name(&mdev->pdev->dev), ##arg) 851 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
851#define mlx4_warn(mdev, format, arg...) \ 852#define mlx4_warn(mdev, format, ...) \
852 pr_warning("%s %s: " format, DRV_NAME, \ 853 pr_warn(DRV_NAME " %s: " format, \
853 dev_name(&mdev->pdev->dev), ##arg) 854 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
854 855
855#endif 856#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 4c71dafad217..2839abb878a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
250 MLX4_CMD_TIME_CLASS_A, 250 MLX4_CMD_TIME_CLASS_A,
251 MLX4_CMD_WRAPPED); 251 MLX4_CMD_WRAPPED);
252 if (err) 252 if (err)
253 mlx4_warn(dev, "Failed to free mtt range at:" 253 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
254 "%d order:%d\n", offset, order); 254 offset, order);
255 return; 255 return;
256 } 256 }
257 __mlx4_free_mtt_range(dev, offset, order); 257 __mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
436 key_to_hw_index(mr->key) & 436 key_to_hw_index(mr->key) &
437 (dev->caps.num_mpts - 1)); 437 (dev->caps.num_mpts - 1));
438 if (err) { 438 if (err) {
439 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); 439 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
440 mlx4_warn(dev, "MR has MWs bound to it.\n"); 440 err);
441 return err; 441 return err;
442 } 442 }
443 443
@@ -774,7 +774,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
774 mlx4_alloc_mtt_range(dev, 774 mlx4_alloc_mtt_range(dev,
775 fls(dev->caps.reserved_mtts - 1)); 775 fls(dev->caps.reserved_mtts - 1));
776 if (priv->reserved_mtts < 0) { 776 if (priv->reserved_mtts < 0) {
777 mlx4_warn(dev, "MTT table of order %u is too small.\n", 777 mlx4_warn(dev, "MTT table of order %u is too small\n",
778 mr_table->mtt_buddy.max_order); 778 mr_table->mtt_buddy.max_order);
779 err = -ENOMEM; 779 err = -ENOMEM;
780 goto err_reserve_mtts; 780 goto err_reserve_mtts;
@@ -955,8 +955,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
955 mailbox = mlx4_alloc_cmd_mailbox(dev); 955 mailbox = mlx4_alloc_cmd_mailbox(dev);
956 if (IS_ERR(mailbox)) { 956 if (IS_ERR(mailbox)) {
957 err = PTR_ERR(mailbox); 957 err = PTR_ERR(mailbox);
958 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" 958 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
959 " failed (%d)\n", err);
960 return; 959 return;
961 } 960 }
962 961
@@ -965,8 +964,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
965 (dev->caps.num_mpts - 1)); 964 (dev->caps.num_mpts - 1));
966 mlx4_free_cmd_mailbox(dev, mailbox); 965 mlx4_free_cmd_mailbox(dev, mailbox);
967 if (err) { 966 if (err) {
968 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", 967 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
969 err);
970 return; 968 return;
971 } 969 }
972 fmr->mr.enabled = MLX4_MPT_EN_SW; 970 fmr->mr.enabled = MLX4_MPT_EN_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 5ec6f203c6e6..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -254,8 +254,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
254 if (validate_index(dev, table, index)) 254 if (validate_index(dev, table, index))
255 goto out; 255 goto out;
256 if (--table->refs[index]) { 256 if (--table->refs[index]) {
257 mlx4_dbg(dev, "Have more references for index %d," 257 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
258 "no need to modify mac table\n", index); 258 index);
259 goto out; 259 goto out;
260 } 260 }
261 261
@@ -453,9 +453,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
453 } 453 }
454 454
455 if (--table->refs[index]) { 455 if (--table->refs[index]) {
456 mlx4_dbg(dev, "Have %d more references for index %d," 456 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
457 "no need to modify vlan table\n", table->refs[index], 457 table->refs[index], index);
458 index);
459 goto out; 458 goto out;
460 } 459 }
461 table->entries[index] = 0; 460 table->entries[index] = 0;
@@ -796,8 +795,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
796 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, 795 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
797 sizeof(gid_entry_tbl->raw))) { 796 sizeof(gid_entry_tbl->raw))) {
798 /* found duplicate */ 797 /* found duplicate */
799 mlx4_warn(dev, "requested gid entry for slave:%d " 798 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
800 "is a duplicate of gid at index %d\n",
801 slave, i); 799 slave, i);
802 mutex_unlock(&(priv->port[port].gid_table.mutex)); 800 mutex_unlock(&(priv->port[port].gid_table.mutex));
803 return -EINVAL; 801 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
164 } 164 }
165 165
166 if (total_size > dev_cap->max_icm_sz) { 166 if (total_size > dev_cap->max_icm_sz) {
167 mlx4_err(dev, "Profile requires 0x%llx bytes; " 167 mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
168 "won't fit in 0x%llx bytes of context memory.\n", 168 (unsigned long long) total_size,
169 (unsigned long long) total_size, 169 (unsigned long long) dev_cap->max_icm_sz);
170 (unsigned long long) dev_cap->max_icm_sz);
171 kfree(profile); 170 kfree(profile);
172 return -ENOMEM; 171 return -ENOMEM;
173 } 172 }
174 173
175 if (profile[i].size) 174 if (profile[i].size)
176 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " 175 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
177 "size 0x%10llx\n", 176 i, res_name[profile[i].type],
178 i, res_name[profile[i].type], profile[i].log_num, 177 profile[i].log_num,
179 (unsigned long long) profile[i].start, 178 (unsigned long long) profile[i].start,
180 (unsigned long long) profile[i].size); 179 (unsigned long long) profile[i].size);
181 } 180 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 40af61947925..0dc31d85fc3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
264 MLX4_CMD_FREE_RES, 264 MLX4_CMD_FREE_RES,
265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
266 if (err) { 266 if (err) {
267 mlx4_warn(dev, "Failed to release qp range" 267 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
268 " base:%d cnt:%d\n", base_qpn, cnt); 268 base_qpn, cnt);
269 } 269 }
270 } else 270 } else
271 __mlx4_qp_release_range(dev, base_qpn, cnt); 271 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -612,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
612 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 612 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
613 context, 0, 0, qp); 613 context, 0, 0, qp);
614 if (err) { 614 if (err) {
615 mlx4_err(dev, "Failed to bring QP to state: " 615 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
616 "%d with error: %d\n",
617 states[i + 1], err); 616 states[i + 1], err);
618 return err; 617 return err;
619 } 618 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
72 hca_header = kmalloc(256, GFP_KERNEL); 72 hca_header = kmalloc(256, GFP_KERNEL);
73 if (!hca_header) { 73 if (!hca_header) {
74 err = -ENOMEM; 74 err = -ENOMEM;
75 mlx4_err(dev, "Couldn't allocate memory to save HCA " 75 mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
76 "PCI header, aborting.\n");
77 goto out; 76 goto out;
78 } 77 }
79 78
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
84 continue; 83 continue;
85 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { 84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
86 err = -ENODEV; 85 err = -ENODEV;
87 mlx4_err(dev, "Couldn't save HCA " 86 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
88 "PCI header, aborting.\n");
89 goto out; 87 goto out;
90 } 88 }
91 } 89 }
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
94 MLX4_RESET_SIZE); 92 MLX4_RESET_SIZE);
95 if (!reset) { 93 if (!reset) {
96 err = -ENOMEM; 94 err = -ENOMEM;
97 mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); 95 mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
98 goto out; 96 goto out;
99 } 97 }
100 98
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
133 131
134 if (vendor == 0xffff) { 132 if (vendor == 0xffff) {
135 err = -ENODEV; 133 err = -ENODEV;
136 mlx4_err(dev, "PCI device did not come back after reset, " 134 mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
137 "aborting.\n");
138 goto out; 135 goto out;
139 } 136 }
140 137
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
144 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 141 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
145 devctl)) { 142 devctl)) {
146 err = -ENODEV; 143 err = -ENODEV;
147 mlx4_err(dev, "Couldn't restore HCA PCI Express " 144 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
148 "Device Control register, aborting.\n");
149 goto out; 145 goto out;
150 } 146 }
151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
152 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 148 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
153 linkctl)) { 149 linkctl)) {
154 err = -ENODEV; 150 err = -ENODEV;
155 mlx4_err(dev, "Couldn't restore HCA PCI Express " 151 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
156 "Link control register, aborting.\n");
157 goto out; 152 goto out;
158 } 153 }
159 } 154 }
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
164 159
165 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { 160 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
166 err = -ENODEV; 161 err = -ENODEV;
167 mlx4_err(dev, "Couldn't restore HCA reg %x, " 162 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
168 "aborting.\n", i); 163 i);
169 goto out; 164 goto out;
170 } 165 }
171 } 166 }
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
173 if (pci_write_config_dword(dev->pdev, PCI_COMMAND, 168 if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
174 hca_header[PCI_COMMAND / 4])) { 169 hca_header[PCI_COMMAND / 4])) {
175 err = -ENODEV; 170 err = -ENODEV;
176 mlx4_err(dev, "Couldn't restore HCA COMMAND, " 171 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
177 "aborting.\n");
178 goto out; 172 goto out;
179 } 173 }
180 174
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2ba3b7623960..0efc1368e5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -279,7 +279,7 @@ enum qp_transition {
279}; 279};
280 280
281/* For Debug uses */ 281/* For Debug uses */
282static const char *ResourceType(enum mlx4_resource rt) 282static const char *resource_str(enum mlx4_resource rt)
283{ 283{
284 switch (rt) { 284 switch (rt) {
285 case RES_QP: return "RES_QP"; 285 case RES_QP: return "RES_QP";
@@ -307,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
307 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308 int err = -EINVAL; 308 int err = -EINVAL;
309 int allocated, free, reserved, guaranteed, from_free; 309 int allocated, free, reserved, guaranteed, from_free;
310 int from_rsvd;
310 311
311 if (slave > dev->num_vfs) 312 if (slave > dev->num_vfs)
312 return -EINVAL; 313 return -EINVAL;
@@ -321,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
321 res_alloc->res_reserved; 322 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave]; 323 guaranteed = res_alloc->guaranteed[slave];
323 324
324 if (allocated + count > res_alloc->quota[slave]) 325 if (allocated + count > res_alloc->quota[slave]) {
326 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 slave, port, resource_str(res_type), count,
328 allocated, res_alloc->quota[slave]);
325 goto out; 329 goto out;
330 }
326 331
327 if (allocated + count <= guaranteed) { 332 if (allocated + count <= guaranteed) {
328 err = 0; 333 err = 0;
334 from_rsvd = count;
329 } else { 335 } else {
330 /* portion may need to be obtained from free area */ 336 /* portion may need to be obtained from free area */
331 if (guaranteed - allocated > 0) 337 if (guaranteed - allocated > 0)
@@ -333,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
333 else 339 else
334 from_free = count; 340 from_free = count;
335 341
336 if (free - from_free > reserved) 342 from_rsvd = count - from_free;
343
344 if (free - from_free >= reserved)
337 err = 0; 345 err = 0;
346 else
347 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 slave, port, resource_str(res_type), free,
349 from_free, reserved);
338 } 350 }
339 351
340 if (!err) { 352 if (!err) {
@@ -342,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
342 if (port > 0) { 354 if (port > 0) {
343 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
344 res_alloc->res_port_free[port - 1] -= count; 356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
345 } else { 358 } else {
346 res_alloc->allocated[slave] += count; 359 res_alloc->allocated[slave] += count;
347 res_alloc->res_free -= count; 360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
348 } 362 }
349 } 363 }
350 364
@@ -360,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
360 struct mlx4_priv *priv = mlx4_priv(dev); 374 struct mlx4_priv *priv = mlx4_priv(dev);
361 struct resource_allocator *res_alloc = 375 struct resource_allocator *res_alloc =
362 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
363 378
364 if (slave > dev->num_vfs) 379 if (slave > dev->num_vfs)
365 return; 380 return;
366 381
367 spin_lock(&res_alloc->alloc_lock); 382 spin_lock(&res_alloc->alloc_lock);
383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
388
389 if (allocated - count >= guaranteed) {
390 from_rsvd = 0;
391 } else {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
395 else
396 from_rsvd = count;
397 }
398
368 if (port > 0) { 399 if (port > 0) {
369 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
370 res_alloc->res_port_free[port - 1] += count; 401 res_alloc->res_port_free[port - 1] += count;
402 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
371 } else { 403 } else {
372 res_alloc->allocated[slave] -= count; 404 res_alloc->allocated[slave] -= count;
373 res_alloc->res_free += count; 405 res_alloc->res_free += count;
406 res_alloc->res_reserved += from_rsvd;
374 } 407 }
375 408
376 spin_unlock(&res_alloc->alloc_lock); 409 spin_unlock(&res_alloc->alloc_lock);
@@ -963,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
963 ret = alloc_srq_tr(id); 996 ret = alloc_srq_tr(id);
964 break; 997 break;
965 case RES_MAC: 998 case RES_MAC:
966 printk(KERN_ERR "implementation missing\n"); 999 pr_err("implementation missing\n");
967 return NULL; 1000 return NULL;
968 case RES_COUNTER: 1001 case RES_COUNTER:
969 ret = alloc_counter_tr(id); 1002 ret = alloc_counter_tr(id);
@@ -1057,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
1057{ 1090{
1058 if (res->com.state == RES_MTT_BUSY || 1091 if (res->com.state == RES_MTT_BUSY ||
1059 atomic_read(&res->ref_count)) { 1092 atomic_read(&res->ref_count)) {
1060 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", 1093 pr_devel("%s-%d: state %s, ref_count %d\n",
1061 __func__, __LINE__, 1094 __func__, __LINE__,
1062 mtt_states_str(res->com.state), 1095 mtt_states_str(res->com.state),
1063 atomic_read(&res->ref_count)); 1096 atomic_read(&res->ref_count));
1064 return -EBUSY; 1097 return -EBUSY;
1065 } else if (res->com.state != RES_MTT_ALLOCATED) 1098 } else if (res->com.state != RES_MTT_ALLOCATED)
1066 return -EPERM; 1099 return -EPERM;
@@ -3897,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3897 } 3930 }
3898 } 3931 }
3899 if (!be_mac) { 3932 if (!be_mac) {
3900 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", 3933 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3901 port); 3934 port);
3902 return -EINVAL; 3935 return -EINVAL;
3903 } 3936 }
@@ -3994,7 +4027,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3994 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4027 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3995 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4028 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3996 if (err) { 4029 if (err) {
3997 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 4030 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
3998 return err; 4031 return err;
3999 } 4032 }
4000 rule_header = (struct _rule_hw *)(ctrl + 1); 4033 rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -4012,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4012 case MLX4_NET_TRANS_RULE_ID_IPV4: 4045 case MLX4_NET_TRANS_RULE_ID_IPV4:
4013 case MLX4_NET_TRANS_RULE_ID_TCP: 4046 case MLX4_NET_TRANS_RULE_ID_TCP:
4014 case MLX4_NET_TRANS_RULE_ID_UDP: 4047 case MLX4_NET_TRANS_RULE_ID_UDP:
4015 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 4048 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4016 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 4049 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4017 err = -EINVAL; 4050 err = -EINVAL;
4018 goto err_put; 4051 goto err_put;
@@ -4021,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4021 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 4054 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4022 break; 4055 break;
4023 default: 4056 default:
4024 pr_err("Corrupted mailbox.\n"); 4057 pr_err("Corrupted mailbox\n");
4025 err = -EINVAL; 4058 err = -EINVAL;
4026 goto err_put; 4059 goto err_put;
4027 } 4060 }
@@ -4035,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4035 4068
4036 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 4069 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4037 if (err) { 4070 if (err) {
4038 mlx4_err(dev, "Fail to add flow steering resources.\n "); 4071 mlx4_err(dev, "Fail to add flow steering resources\n");
4039 /* detach rule*/ 4072 /* detach rule*/
4040 mlx4_cmd(dev, vhcr->out_param, 0, 0, 4073 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4041 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4074 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4073,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4073 4106
4074 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 4107 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4075 if (err) { 4108 if (err) {
4076 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 4109 mlx4_err(dev, "Fail to remove flow steering resources\n");
4077 goto out; 4110 goto out;
4078 } 4111 }
4079 4112
@@ -4151,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
4151 if (print) 4184 if (print)
4152 mlx4_dbg(dev, 4185 mlx4_dbg(dev,
4153 "%s id 0x%llx is busy\n", 4186 "%s id 0x%llx is busy\n",
4154 ResourceType(type), 4187 resource_str(type),
4155 r->res_id); 4188 r->res_id);
4156 ++busy; 4189 ++busy;
4157 } else { 4190 } else {
@@ -4202,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4202 4235
4203 err = move_all_busy(dev, slave, RES_QP); 4236 err = move_all_busy(dev, slave, RES_QP);
4204 if (err) 4237 if (err)
4205 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" 4238 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4206 "for slave %d\n", slave); 4239 slave);
4207 4240
4208 spin_lock_irq(mlx4_tlock(dev)); 4241 spin_lock_irq(mlx4_tlock(dev));
4209 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4242 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4241,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4241 MLX4_CMD_TIME_CLASS_A, 4274 MLX4_CMD_TIME_CLASS_A,
4242 MLX4_CMD_NATIVE); 4275 MLX4_CMD_NATIVE);
4243 if (err) 4276 if (err)
4244 mlx4_dbg(dev, "rem_slave_qps: failed" 4277 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4245 " to move slave %d qpn %d to" 4278 slave, qp->local_qpn);
4246 " reset\n", slave,
4247 qp->local_qpn);
4248 atomic_dec(&qp->rcq->ref_count); 4279 atomic_dec(&qp->rcq->ref_count);
4249 atomic_dec(&qp->scq->ref_count); 4280 atomic_dec(&qp->scq->ref_count);
4250 atomic_dec(&qp->mtt->ref_count); 4281 atomic_dec(&qp->mtt->ref_count);
@@ -4278,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4278 4309
4279 err = move_all_busy(dev, slave, RES_SRQ); 4310 err = move_all_busy(dev, slave, RES_SRQ);
4280 if (err) 4311 if (err)
4281 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " 4312 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4282 "busy for slave %d\n", slave); 4313 slave);
4283 4314
4284 spin_lock_irq(mlx4_tlock(dev)); 4315 spin_lock_irq(mlx4_tlock(dev));
4285 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4316 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4309,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4309 MLX4_CMD_TIME_CLASS_A, 4340 MLX4_CMD_TIME_CLASS_A,
4310 MLX4_CMD_NATIVE); 4341 MLX4_CMD_NATIVE);
4311 if (err) 4342 if (err)
4312 mlx4_dbg(dev, "rem_slave_srqs: failed" 4343 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4313 " to move slave %d srq %d to"
4314 " SW ownership\n",
4315 slave, srqn); 4344 slave, srqn);
4316 4345
4317 atomic_dec(&srq->mtt->ref_count); 4346 atomic_dec(&srq->mtt->ref_count);
@@ -4346,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4346 4375
4347 err = move_all_busy(dev, slave, RES_CQ); 4376 err = move_all_busy(dev, slave, RES_CQ);
4348 if (err) 4377 if (err)
4349 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " 4378 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4350 "busy for slave %d\n", slave); 4379 slave);
4351 4380
4352 spin_lock_irq(mlx4_tlock(dev)); 4381 spin_lock_irq(mlx4_tlock(dev));
4353 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4382 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4377,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4377 MLX4_CMD_TIME_CLASS_A, 4406 MLX4_CMD_TIME_CLASS_A,
4378 MLX4_CMD_NATIVE); 4407 MLX4_CMD_NATIVE);
4379 if (err) 4408 if (err)
4380 mlx4_dbg(dev, "rem_slave_cqs: failed" 4409 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4381 " to move slave %d cq %d to"
4382 " SW ownership\n",
4383 slave, cqn); 4410 slave, cqn);
4384 atomic_dec(&cq->mtt->ref_count); 4411 atomic_dec(&cq->mtt->ref_count);
4385 state = RES_CQ_ALLOCATED; 4412 state = RES_CQ_ALLOCATED;
@@ -4411,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4411 4438
4412 err = move_all_busy(dev, slave, RES_MPT); 4439 err = move_all_busy(dev, slave, RES_MPT);
4413 if (err) 4440 if (err)
4414 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " 4441 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4415 "busy for slave %d\n", slave); 4442 slave);
4416 4443
4417 spin_lock_irq(mlx4_tlock(dev)); 4444 spin_lock_irq(mlx4_tlock(dev));
4418 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4445 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4447,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4447 MLX4_CMD_TIME_CLASS_A, 4474 MLX4_CMD_TIME_CLASS_A,
4448 MLX4_CMD_NATIVE); 4475 MLX4_CMD_NATIVE);
4449 if (err) 4476 if (err)
4450 mlx4_dbg(dev, "rem_slave_mrs: failed" 4477 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4451 " to move slave %d mpt %d to"
4452 " SW ownership\n",
4453 slave, mptn); 4478 slave, mptn);
4454 if (mpt->mtt) 4479 if (mpt->mtt)
4455 atomic_dec(&mpt->mtt->ref_count); 4480 atomic_dec(&mpt->mtt->ref_count);
@@ -4481,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4481 4506
4482 err = move_all_busy(dev, slave, RES_MTT); 4507 err = move_all_busy(dev, slave, RES_MTT);
4483 if (err) 4508 if (err)
4484 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " 4509 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4485 "busy for slave %d\n", slave); 4510 slave);
4486 4511
4487 spin_lock_irq(mlx4_tlock(dev)); 4512 spin_lock_irq(mlx4_tlock(dev));
4488 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4513 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4584,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4584 4609
4585 err = move_all_busy(dev, slave, RES_EQ); 4610 err = move_all_busy(dev, slave, RES_EQ);
4586 if (err) 4611 if (err)
4587 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " 4612 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4588 "busy for slave %d\n", slave); 4613 slave);
4589 4614
4590 spin_lock_irq(mlx4_tlock(dev)); 4615 spin_lock_irq(mlx4_tlock(dev));
4591 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 4616 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4617,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4617 MLX4_CMD_TIME_CLASS_A, 4642 MLX4_CMD_TIME_CLASS_A,
4618 MLX4_CMD_NATIVE); 4643 MLX4_CMD_NATIVE);
4619 if (err) 4644 if (err)
4620 mlx4_dbg(dev, "rem_slave_eqs: failed" 4645 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4621 " to move slave %d eqs %d to" 4646 slave, eqn);
4622 " SW ownership\n", slave, eqn);
4623 mlx4_free_cmd_mailbox(dev, mailbox); 4647 mlx4_free_cmd_mailbox(dev, mailbox);
4624 atomic_dec(&eq->mtt->ref_count); 4648 atomic_dec(&eq->mtt->ref_count);
4625 state = RES_EQ_RESERVED; 4649 state = RES_EQ_RESERVED;
@@ -4648,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4648 4672
4649 err = move_all_busy(dev, slave, RES_COUNTER); 4673 err = move_all_busy(dev, slave, RES_COUNTER);
4650 if (err) 4674 if (err)
4651 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " 4675 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4652 "busy for slave %d\n", slave); 4676 slave);
4653 4677
4654 spin_lock_irq(mlx4_tlock(dev)); 4678 spin_lock_irq(mlx4_tlock(dev));
4655 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4679 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4679,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4679 4703
4680 err = move_all_busy(dev, slave, RES_XRCD); 4704 err = move_all_busy(dev, slave, RES_XRCD);
4681 if (err) 4705 if (err)
4682 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " 4706 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4683 "busy for slave %d\n", slave); 4707 slave);
4684 4708
4685 spin_lock_irq(mlx4_tlock(dev)); 4709 spin_lock_irq(mlx4_tlock(dev));
4686 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 4710 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4825,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4825 0, MLX4_CMD_UPDATE_QP, 4849 0, MLX4_CMD_UPDATE_QP,
4826 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 4850 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4827 if (err) { 4851 if (err) {
4828 mlx4_info(dev, "UPDATE_QP failed for slave %d, " 4852 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4829 "port %d, qpn %d (%d)\n", 4853 work->slave, port, qp->local_qpn, err);
4830 work->slave, port, qp->local_qpn,
4831 err);
4832 errors++; 4854 errors++;
4833 } 4855 }
4834 } 4856 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
620 mlx5_command_str(msg_to_opcode(ent->in)), 620 mlx5_command_str(msg_to_opcode(ent->in)),
621 msg_to_opcode(ent->in)); 621 msg_to_opcode(ent->in));
622 } 622 }
623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, 623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
624 deliv_status_to_str(ent->status), ent->status); 624 err, deliv_status_to_str(ent->status), ent->status);
625 625
626 return err; 626 return err;
627} 627}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
208 */ 208 */
209 rmb(); 209 rmb();
210 210
211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); 211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
212 eq->eqn, eqe_type_str(eqe->type));
212 switch (eqe->type) { 213 switch (eqe->type) {
213 case MLX5_EVENT_TYPE_COMP: 214 case MLX5_EVENT_TYPE_COMP:
214 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 215 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 271 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 272 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 273
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 274 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
275 func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 276 mlx5_core_req_pages_handler(dev, func_id, npages);
275 } 277 }
276 break; 278 break;
277 279
278 280
279 default: 281 default:
280 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); 282 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
283 eqe->type, eq->eqn);
281 break; 284 break;
282 } 285 }
283 286
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
66 66
67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
68 if (err) { 68 if (err) {
69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
71 if (err) { 71 if (err) {
72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
73 return err; 73 return err;
74 } 74 }
75 } 75 }
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
78 if (err) { 78 if (err) {
79 dev_warn(&pdev->dev, 79 dev_warn(&pdev->dev,
80 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); 80 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
82 if (err) { 82 if (err) {
83 dev_err(&pdev->dev, 83 dev_err(&pdev->dev,
84 "Can't set consistent PCI DMA mask, aborting.\n"); 84 "Can't set consistent PCI DMA mask, aborting\n");
85 return err; 85 return err;
86 } 86 }
87 } 87 }
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
95 int err = 0; 95 int err = 0;
96 96
97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
98 dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); 98 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
99 return -ENODEV; 99 return -ENODEV;
100 } 100 }
101 101
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
319 319
320 err = pci_enable_device(pdev); 320 err = pci_enable_device(pdev);
321 if (err) { 321 if (err) {
322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
323 goto err_dbg; 323 goto err_dbg;
324 } 324 }
325 325
326 err = request_bar(pdev); 326 err = request_bar(pdev);
327 if (err) { 327 if (err) {
328 dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); 328 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
329 goto err_disable; 329 goto err_disable;
330 } 330 }
331 331
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
39 39
40extern int mlx5_core_debug_mask; 40extern int mlx5_core_debug_mask;
41 41
42#define mlx5_core_dbg(dev, format, arg...) \ 42#define mlx5_core_dbg(dev, format, ...) \
43pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 43 pr_debug("%s:%s:%d:(pid %d): " format, \
44 current->pid, ##arg) 44 (dev)->priv.name, __func__, __LINE__, current->pid, \
45 ##__VA_ARGS__)
45 46
46#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ 47#define mlx5_core_dbg_mask(dev, mask, format, ...) \
47do { \ 48do { \
48 if ((mask) & mlx5_core_debug_mask) \ 49 if ((mask) & mlx5_core_debug_mask) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ 50 mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
50 __func__, __LINE__, current->pid, ##arg); \
51} while (0) 51} while (0)
52 52
53#define mlx5_core_err(dev, format, arg...) \ 53#define mlx5_core_err(dev, format, ...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 54 pr_err("%s:%s:%d:(pid %d): " format, \
55 current->pid, ##arg) 55 (dev)->priv.name, __func__, __LINE__, current->pid, \
56 ##__VA_ARGS__)
56 57
57#define mlx5_core_warn(dev, format, arg...) \ 58#define mlx5_core_warn(dev, format, ...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 59 pr_warn("%s:%s:%d:(pid %d): " format, \
59 current->pid, ##arg) 60 (dev)->priv.name, __func__, __LINE__, current->pid, \
61 ##__VA_ARGS__)
60 62
61enum { 63enum {
62 MLX5_CMD_DATA, /* print command payload only */ 64 MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ac52a0fe2d3a..ba0401d4af50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
73 } 73 }
74 74
75 if (err) { 75 if (err) {
76 mlx5_core_dbg(dev, "cmd exec faile %d\n", err); 76 mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
77 return err; 77 return err;
78 } 78 }
79 79
@@ -195,7 +195,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
195 } 195 }
196 196
197 if (out.hdr.status) { 197 if (out.hdr.status) {
198 mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); 198 mlx5_core_err(dev, "create_psv bad status %d\n",
199 out.hdr.status);
199 return mlx5_cmd_status_to_err(&out.hdr); 200 return mlx5_cmd_status_to_err(&out.hdr);
200 } 201 }
201 202
@@ -224,7 +225,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
224 } 225 }
225 226
226 if (out.hdr.status) { 227 if (out.hdr.status) {
227 mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); 228 mlx5_core_err(dev, "destroy_psv bad status %d\n",
229 out.hdr.status);
228 err = mlx5_cmd_status_to_err(&out.hdr); 230 err = mlx5_cmd_status_to_err(&out.hdr);
229 goto out; 231 goto out;
230 } 232 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
311 in->num_entries = cpu_to_be32(npages); 311 in->num_entries = cpu_to_be32(npages);
312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
313 if (err) { 313 if (err) {
314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); 314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
315 func_id, npages, err);
315 goto out_alloc; 316 goto out_alloc;
316 } 317 }
317 dev->priv.fw_pages += npages; 318 dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
319 if (out.hdr.status) { 320 if (out.hdr.status) {
320 err = mlx5_cmd_status_to_err(&out.hdr); 321 err = mlx5_cmd_status_to_err(&out.hdr);
321 if (err) { 322 if (err) {
322 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); 323 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
324 func_id, npages, out.hdr.status);
323 goto out_alloc; 325 goto out_alloc;
324 } 326 }
325 } 327 }
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
378 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 380 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
379 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 381 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
380 if (err) { 382 if (err) {
381 mlx5_core_err(dev, "failed recliaming pages\n"); 383 mlx5_core_err(dev, "failed reclaiming pages\n");
382 goto out_free; 384 goto out_free;
383 } 385 }
384 dev->priv.fw_pages -= npages; 386 dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
414 err = give_pages(dev, req->func_id, req->npages, 1); 416 err = give_pages(dev, req->func_id, req->npages, 1);
415 417
416 if (err) 418 if (err)
417 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? 419 mlx5_core_warn(dev, "%s fail %d\n",
418 "reclaim" : "give", err); 420 req->npages < 0 ? "reclaim" : "give", err);
419 421
420 kfree(req); 422 kfree(req);
421} 423}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
487 optimal_reclaimed_pages(), 489 optimal_reclaimed_pages(),
488 &nclaimed); 490 &nclaimed);
489 if (err) { 491 if (err) {
490 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 492 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
493 err);
491 return err; 494 return err;
492 } 495 }
493 if (nclaimed) 496 if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
79 79
80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
81 if (err) { 81 if (err) {
82 mlx5_core_warn(dev, "ret %d", err); 82 mlx5_core_warn(dev, "ret %d\n", err);
83 return err; 83 return err;
84 } 84 }
85 85
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
96 err = radix_tree_insert(&table->tree, qp->qpn, qp); 96 err = radix_tree_insert(&table->tree, qp->qpn, qp);
97 spin_unlock_irq(&table->lock); 97 spin_unlock_irq(&table->lock);
98 if (err) { 98 if (err) {
99 mlx5_core_warn(dev, "err %d", err); 99 mlx5_core_warn(dev, "err %d\n", err);
100 goto err_cmd; 100 goto err_cmd;
101 } 101 }
102 102
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 16435b3cfa9f..6c7c78baedca 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
1504 if (ksp->phyiface_regs && ksp->link_irq == -1) { 1504 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1505 ks8695_init_switch(ksp); 1505 ks8695_init_switch(ksp);
1506 ksp->dtype = KS8695_DTYPE_LAN; 1506 ksp->dtype = KS8695_DTYPE_LAN;
1507 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1507 ndev->ethtool_ops = &ks8695_ethtool_ops;
1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) { 1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1509 ks8695_init_wan_phy(ksp); 1509 ks8695_init_wan_phy(ksp);
1510 ksp->dtype = KS8695_DTYPE_WAN; 1510 ksp->dtype = KS8695_DTYPE_WAN;
1511 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); 1511 ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
1512 } else { 1512 } else {
1513 /* No initialisation since HPNA does not have a PHY */ 1513 /* No initialisation since HPNA does not have a PHY */
1514 ksp->dtype = KS8695_DTYPE_HPNA; 1514 ksp->dtype = KS8695_DTYPE_HPNA;
1515 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1515 ndev->ethtool_ops = &ks8695_ethtool_ops;
1516 } 1516 }
1517 1517
1518 /* And bring up the net_device with the net core */ 1518 /* And bring up the net_device with the net core */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e0c92e0e5e1d..66d4ab703f45 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -26,6 +26,8 @@
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27 27
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/gpio.h>
30#include <linux/of_gpio.h>
29 31
30#include "ks8851.h" 32#include "ks8851.h"
31 33
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
85 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom 87 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
86 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. 88 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
87 * @vdd_reg: Optional regulator supplying the chip 89 * @vdd_reg: Optional regulator supplying the chip
90 * @vdd_io: Optional digital power supply for IO
91 * @gpio: Optional reset_n gpio
88 * 92 *
89 * The @lock ensures that the chip is protected when certain operations are 93 * The @lock ensures that the chip is protected when certain operations are
90 * in progress. When the read or write packet transfer is in progress, most 94 * in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
133 137
134 struct eeprom_93cx6 eeprom; 138 struct eeprom_93cx6 eeprom;
135 struct regulator *vdd_reg; 139 struct regulator *vdd_reg;
140 struct regulator *vdd_io;
141 int gpio;
136}; 142};
137 143
138static int msg_enable; 144static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
1404 struct ks8851_net *ks; 1410 struct ks8851_net *ks;
1405 int ret; 1411 int ret;
1406 unsigned cider; 1412 unsigned cider;
1413 int gpio;
1407 1414
1408 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1415 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1409 if (!ndev) 1416 if (!ndev)
@@ -1417,20 +1424,53 @@ static int ks8851_probe(struct spi_device *spi)
1417 ks->spidev = spi; 1424 ks->spidev = spi;
1418 ks->tx_space = 6144; 1425 ks->tx_space = 6144;
1419 1426
1420 ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); 1427 gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
1421 if (IS_ERR(ks->vdd_reg)) { 1428 0, NULL);
1422 ret = PTR_ERR(ks->vdd_reg); 1429 if (gpio == -EPROBE_DEFER) {
1423 if (ret == -EPROBE_DEFER) 1430 ret = gpio;
1424 goto err_reg; 1431 goto err_gpio;
1425 } else { 1432 }
1426 ret = regulator_enable(ks->vdd_reg); 1433
1434 ks->gpio = gpio;
1435 if (gpio_is_valid(gpio)) {
1436 ret = devm_gpio_request_one(&spi->dev, gpio,
1437 GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
1427 if (ret) { 1438 if (ret) {
1428 dev_err(&spi->dev, "regulator enable fail: %d\n", 1439 dev_err(&spi->dev, "reset gpio request failed\n");
1429 ret); 1440 goto err_gpio;
1430 goto err_reg_en;
1431 } 1441 }
1432 } 1442 }
1433 1443
1444 ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
1445 if (IS_ERR(ks->vdd_io)) {
1446 ret = PTR_ERR(ks->vdd_io);
1447 goto err_reg_io;
1448 }
1449
1450 ret = regulator_enable(ks->vdd_io);
1451 if (ret) {
1452 dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
1453 ret);
1454 goto err_reg_io;
1455 }
1456
1457 ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
1458 if (IS_ERR(ks->vdd_reg)) {
1459 ret = PTR_ERR(ks->vdd_reg);
1460 goto err_reg;
1461 }
1462
1463 ret = regulator_enable(ks->vdd_reg);
1464 if (ret) {
1465 dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
1466 ret);
1467 goto err_reg;
1468 }
1469
1470 if (gpio_is_valid(gpio)) {
1471 usleep_range(10000, 11000);
1472 gpio_set_value(gpio, 1);
1473 }
1434 1474
1435 mutex_init(&ks->lock); 1475 mutex_init(&ks->lock);
1436 spin_lock_init(&ks->statelock); 1476 spin_lock_init(&ks->statelock);
@@ -1471,7 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
1471 1511
1472 skb_queue_head_init(&ks->txq); 1512 skb_queue_head_init(&ks->txq);
1473 1513
1474 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); 1514 ndev->ethtool_ops = &ks8851_ethtool_ops;
1475 SET_NETDEV_DEV(ndev, &spi->dev); 1515 SET_NETDEV_DEV(ndev, &spi->dev);
1476 1516
1477 spi_set_drvdata(spi, ks); 1517 spi_set_drvdata(spi, ks);
@@ -1527,13 +1567,14 @@ err_netdev:
1527 free_irq(ndev->irq, ks); 1567 free_irq(ndev->irq, ks);
1528 1568
1529err_irq: 1569err_irq:
1570 if (gpio_is_valid(gpio))
1571 gpio_set_value(gpio, 0);
1530err_id: 1572err_id:
1531 if (!IS_ERR(ks->vdd_reg)) 1573 regulator_disable(ks->vdd_reg);
1532 regulator_disable(ks->vdd_reg);
1533err_reg_en:
1534 if (!IS_ERR(ks->vdd_reg))
1535 regulator_put(ks->vdd_reg);
1536err_reg: 1574err_reg:
1575 regulator_disable(ks->vdd_io);
1576err_reg_io:
1577err_gpio:
1537 free_netdev(ndev); 1578 free_netdev(ndev);
1538 return ret; 1579 return ret;
1539} 1580}
@@ -1547,18 +1588,24 @@ static int ks8851_remove(struct spi_device *spi)
1547 1588
1548 unregister_netdev(priv->netdev); 1589 unregister_netdev(priv->netdev);
1549 free_irq(spi->irq, priv); 1590 free_irq(spi->irq, priv);
1550 if (!IS_ERR(priv->vdd_reg)) { 1591 if (gpio_is_valid(priv->gpio))
1551 regulator_disable(priv->vdd_reg); 1592 gpio_set_value(priv->gpio, 0);
1552 regulator_put(priv->vdd_reg); 1593 regulator_disable(priv->vdd_reg);
1553 } 1594 regulator_disable(priv->vdd_io);
1554 free_netdev(priv->netdev); 1595 free_netdev(priv->netdev);
1555 1596
1556 return 0; 1597 return 0;
1557} 1598}
1558 1599
1600static const struct of_device_id ks8851_match_table[] = {
1601 { .compatible = "micrel,ks8851" },
1602 { }
1603};
1604
1559static struct spi_driver ks8851_driver = { 1605static struct spi_driver ks8851_driver = {
1560 .driver = { 1606 .driver = {
1561 .name = "ks8851", 1607 .name = "ks8851",
1608 .of_match_table = ks8851_match_table,
1562 .owner = THIS_MODULE, 1609 .owner = THIS_MODULE,
1563 .pm = &ks8851_pm_ops, 1610 .pm = &ks8851_pm_ops,
1564 }, 1611 },
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 14ac0e2bc09f..064a48d0c368 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
4930 * Only reset the hardware if time between calls is long 4930 * Only reset the hardware if time between calls is long
4931 * enough. 4931 * enough.
4932 */ 4932 */
4933 if (jiffies - last_reset <= dev->watchdog_timeo) 4933 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
4934 hw_priv = NULL; 4934 hw_priv = NULL;
4935 } 4935 }
4936 4936
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
7072 dev = alloc_etherdev(sizeof(struct dev_priv)); 7072 dev = alloc_etherdev(sizeof(struct dev_priv));
7073 if (!dev) 7073 if (!dev)
7074 goto pcidev_init_reg_err; 7074 goto pcidev_init_reg_err;
7075 SET_NETDEV_DEV(dev, &pdev->dev);
7075 info->netdev[i] = dev; 7076 info->netdev[i] = dev;
7076 7077
7077 priv = netdev_priv(dev); 7078 priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
7106 } 7107 }
7107 7108
7108 dev->netdev_ops = &netdev_ops; 7109 dev->netdev_ops = &netdev_ops;
7109 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7110 dev->ethtool_ops = &netdev_ethtool_ops;
7110 if (register_netdev(dev)) 7111 if (register_netdev(dev))
7111 goto pcidev_init_reg_err; 7112 goto pcidev_init_reg_err;
7112 port_set_power_saving(port, true); 7113 port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index c7b40aa21f22..b1b5f66b8b69 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
1593 dev->irq = spi->irq; 1593 dev->irq = spi->irq;
1594 dev->netdev_ops = &enc28j60_netdev_ops; 1594 dev->netdev_ops = &enc28j60_netdev_ops;
1595 dev->watchdog_timeo = TX_TIMEOUT; 1595 dev->watchdog_timeo = TX_TIMEOUT;
1596 SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); 1596 dev->ethtool_ops = &enc28j60_ethtool_ops;
1597 1597
1598 enc28j60_lowpower(priv, true); 1598 enc28j60_lowpower(priv, true);
1599 1599
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 130f6b204efa..f3d5d79f1cd1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4112 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, 4112 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
4113 (unsigned long)mgp); 4113 (unsigned long)mgp);
4114 4114
4115 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 4115 netdev->ethtool_ops = &myri10ge_ethtool_ops;
4116 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); 4116 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
4117 status = register_netdev(netdev); 4117 status = register_netdev(netdev);
4118 if (status != 0) { 4118 if (status != 0) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 64ec2a437f46..291fba8b9f07 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
927 dev->netdev_ops = &natsemi_netdev_ops; 927 dev->netdev_ops = &natsemi_netdev_ops;
928 dev->watchdog_timeo = TX_TIMEOUT; 928 dev->watchdog_timeo = TX_TIMEOUT;
929 929
930 SET_ETHTOOL_OPS(dev, &ethtool_ops); 930 dev->ethtool_ops = &ethtool_ops;
931 931
932 if (mtu) 932 if (mtu)
933 dev->mtu = mtu; 933 dev->mtu = mtu;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index dbccf1de49ec..19bb8244b9e3 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
2030 pci_dev->subsystem_vendor, pci_dev->subsystem_device); 2030 pci_dev->subsystem_vendor, pci_dev->subsystem_device);
2031 2031
2032 ndev->netdev_ops = &netdev_ops; 2032 ndev->netdev_ops = &netdev_ops;
2033 SET_ETHTOOL_OPS(ndev, &ops); 2033 ndev->ethtool_ops = &ops;
2034 ndev->watchdog_timeo = 5 * HZ; 2034 ndev->watchdog_timeo = 5 * HZ;
2035 pci_set_drvdata(pci_dev, ndev); 2035 pci_set_drvdata(pci_dev, ndev);
2036 2036
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff322c4..be587647c706 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
534 netif_tx_start_all_queues(sp->dev); 534 netif_tx_start_all_queues(sp->dev);
535} 535}
536 536
537static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
538{
539 if (!sp->config.multiq)
540 sp->mac_control.fifos[fifo_no].queue_state =
541 FIFO_QUEUE_START;
542
543 netif_tx_start_all_queues(sp->dev);
544}
545
546static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) 537static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
547{ 538{
548 if (!sp->config.multiq) { 539 if (!sp->config.multiq) {
@@ -5369,8 +5360,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5369 ethtool_cmd_speed_set(info, SPEED_10000); 5360 ethtool_cmd_speed_set(info, SPEED_10000);
5370 info->duplex = DUPLEX_FULL; 5361 info->duplex = DUPLEX_FULL;
5371 } else { 5362 } else {
5372 ethtool_cmd_speed_set(info, -1); 5363 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
5373 info->duplex = -1; 5364 info->duplex = DUPLEX_UNKNOWN;
5374 } 5365 }
5375 5366
5376 info->autoneg = AUTONEG_DISABLE; 5367 info->autoneg = AUTONEG_DISABLE;
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7919 7910
7920 /* Driver entry points */ 7911 /* Driver entry points */
7921 dev->netdev_ops = &s2io_netdev_ops; 7912 dev->netdev_ops = &s2io_netdev_ops;
7922 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7913 dev->ethtool_ops = &netdev_ethtool_ops;
7923 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 7914 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7924 NETIF_F_TSO | NETIF_F_TSO6 | 7915 NETIF_F_TSO | NETIF_F_TSO6 |
7925 NETIF_F_RXCSUM | NETIF_F_LRO; 7916 NETIF_F_RXCSUM | NETIF_F_LRO;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 089b713b9f7b..2bbd01fcb9b0 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -120,7 +120,6 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120{ 120{
121 u64 val64; 121 u64 val64;
122 u32 i = 0; 122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124 123
125 udelay(10); 124 udelay(10);
126 125
@@ -139,7 +138,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
139 mdelay(1); 138 mdelay(1);
140 } while (++i <= max_millis); 139 } while (++i <= max_millis);
141 140
142 return ret; 141 return VXGE_HW_FAIL;
143} 142}
144 143
145static inline enum vxge_hw_status 144static inline enum vxge_hw_status
@@ -1682,12 +1681,10 @@ enum vxge_hw_status vxge_hw_driver_stats_get(
1682 struct __vxge_hw_device *hldev, 1681 struct __vxge_hw_device *hldev,
1683 struct vxge_hw_device_stats_sw_info *sw_stats) 1682 struct vxge_hw_device_stats_sw_info *sw_stats)
1684{ 1683{
1685 enum vxge_hw_status status = VXGE_HW_OK;
1686
1687 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, 1684 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1688 sizeof(struct vxge_hw_device_stats_sw_info)); 1685 sizeof(struct vxge_hw_device_stats_sw_info));
1689 1686
1690 return status; 1687 return VXGE_HW_OK;
1691} 1688}
1692 1689
1693/* 1690/*
@@ -3228,7 +3225,6 @@ enum vxge_hw_status
3228vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) 3225vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3229{ 3226{
3230 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 3227 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3231 enum vxge_hw_status status = VXGE_HW_OK;
3232 int i = 0, j = 0; 3228 int i = 0, j = 0;
3233 3229
3234 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3230 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -3241,7 +3237,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3241 return VXGE_HW_FAIL; 3237 return VXGE_HW_FAIL;
3242 } 3238 }
3243 } 3239 }
3244 return status; 3240 return VXGE_HW_OK;
3245} 3241}
3246/* 3242/*
3247 * vxge_hw_mgmt_reg_Write - Write Titan register. 3243 * vxge_hw_mgmt_reg_Write - Write Titan register.
@@ -3979,7 +3975,6 @@ __vxge_hw_vpath_mgmt_read(
3979{ 3975{
3980 u32 i, mtu = 0, max_pyld = 0; 3976 u32 i, mtu = 0, max_pyld = 0;
3981 u64 val64; 3977 u64 val64;
3982 enum vxge_hw_status status = VXGE_HW_OK;
3983 3978
3984 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { 3979 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3985 3980
@@ -4009,7 +4004,7 @@ __vxge_hw_vpath_mgmt_read(
4009 else 4004 else
4010 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); 4005 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4011 4006
4012 return status; 4007 return VXGE_HW_OK;
4013} 4008}
4014 4009
4015/* 4010/*
@@ -4039,14 +4034,13 @@ static enum vxge_hw_status
4039__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) 4034__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4040{ 4035{
4041 u64 val64; 4036 u64 val64;
4042 enum vxge_hw_status status = VXGE_HW_OK;
4043 4037
4044 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); 4038 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4045 4039
4046 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 4040 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4047 &hldev->common_reg->cmn_rsthdlr_cfg0); 4041 &hldev->common_reg->cmn_rsthdlr_cfg0);
4048 4042
4049 return status; 4043 return VXGE_HW_OK;
4050} 4044}
4051 4045
4052/* 4046/*
@@ -4227,7 +4221,6 @@ static enum vxge_hw_status
4227__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) 4221__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4228{ 4222{
4229 u64 val64; 4223 u64 val64;
4230 enum vxge_hw_status status = VXGE_HW_OK;
4231 struct __vxge_hw_virtualpath *vpath; 4224 struct __vxge_hw_virtualpath *vpath;
4232 struct vxge_hw_vp_config *vp_config; 4225 struct vxge_hw_vp_config *vp_config;
4233 struct vxge_hw_vpath_reg __iomem *vp_reg; 4226 struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4283,7 +4276,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4283 4276
4284 writeq(val64, &vp_reg->rxmac_vcfg1); 4277 writeq(val64, &vp_reg->rxmac_vcfg1);
4285 } 4278 }
4286 return status; 4279 return VXGE_HW_OK;
4287} 4280}
4288 4281
4289/* 4282/*
@@ -4295,7 +4288,6 @@ static enum vxge_hw_status
4295__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) 4288__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4296{ 4289{
4297 u64 val64; 4290 u64 val64;
4298 enum vxge_hw_status status = VXGE_HW_OK;
4299 struct __vxge_hw_virtualpath *vpath; 4291 struct __vxge_hw_virtualpath *vpath;
4300 struct vxge_hw_vpath_reg __iomem *vp_reg; 4292 struct vxge_hw_vpath_reg __iomem *vp_reg;
4301 struct vxge_hw_vp_config *config; 4293 struct vxge_hw_vp_config *config;
@@ -4545,7 +4537,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4545 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); 4537 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4546 writeq(val64, &vp_reg->tim_wrkld_clc); 4538 writeq(val64, &vp_reg->tim_wrkld_clc);
4547 4539
4548 return status; 4540 return VXGE_HW_OK;
4549} 4541}
4550 4542
4551/* 4543/*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index f8f073880f84..b07d552a27d4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -62,8 +62,8 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
62 ethtool_cmd_speed_set(info, SPEED_10000); 62 ethtool_cmd_speed_set(info, SPEED_10000);
63 info->duplex = DUPLEX_FULL; 63 info->duplex = DUPLEX_FULL;
64 } else { 64 } else {
65 ethtool_cmd_speed_set(info, -1); 65 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
66 info->duplex = -1; 66 info->duplex = DUPLEX_UNKNOWN;
67 } 67 }
68 68
69 info->autoneg = AUTONEG_DISABLE; 69 info->autoneg = AUTONEG_DISABLE;
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1128 1128
1129void vxge_initialize_ethtool_ops(struct net_device *ndev) 1129void vxge_initialize_ethtool_ops(struct net_device *ndev)
1130{ 1130{
1131 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); 1131 ndev->ethtool_ops = &vxge_ethtool_ops;
1132} 1132}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcbb8543..7a0deadd53bf 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2122static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) 2122static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2123{ 2123{
2124 fifo->interrupt_count++; 2124 fifo->interrupt_count++;
2125 if (jiffies > fifo->jiffies + HZ / 100) { 2125 if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
2126 struct __vxge_hw_fifo *hw_fifo = fifo->handle; 2126 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2127 2127
2128 fifo->jiffies = jiffies; 2128 fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2150static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) 2150static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2151{ 2151{
2152 ring->interrupt_count++; 2152 ring->interrupt_count++;
2153 if (jiffies > ring->jiffies + HZ / 100) { 2153 if (time_before(ring->jiffies + HZ / 100, jiffies)) {
2154 struct __vxge_hw_ring *hw_ring = ring->handle; 2154 struct __vxge_hw_ring *hw_ring = ring->handle;
2155 2155
2156 ring->jiffies = jiffies; 2156 ring->jiffies = jiffies;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index fddb464aeab3..9afc536c5734 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -406,7 +406,7 @@ union ring_type {
406 406
407#define NV_RX_DESCRIPTORVALID (1<<16) 407#define NV_RX_DESCRIPTORVALID (1<<16)
408#define NV_RX_MISSEDFRAME (1<<17) 408#define NV_RX_MISSEDFRAME (1<<17)
409#define NV_RX_SUBSTRACT1 (1<<18) 409#define NV_RX_SUBTRACT1 (1<<18)
410#define NV_RX_ERROR1 (1<<23) 410#define NV_RX_ERROR1 (1<<23)
411#define NV_RX_ERROR2 (1<<24) 411#define NV_RX_ERROR2 (1<<24)
412#define NV_RX_ERROR3 (1<<25) 412#define NV_RX_ERROR3 (1<<25)
@@ -423,7 +423,7 @@ union ring_type {
423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
425#define NV_RX2_DESCRIPTORVALID (1<<29) 425#define NV_RX2_DESCRIPTORVALID (1<<29)
426#define NV_RX2_SUBSTRACT1 (1<<25) 426#define NV_RX2_SUBTRACT1 (1<<25)
427#define NV_RX2_ERROR1 (1<<18) 427#define NV_RX2_ERROR1 (1<<18)
428#define NV_RX2_ERROR2 (1<<19) 428#define NV_RX2_ERROR2 (1<<19)
429#define NV_RX2_ERROR3 (1<<20) 429#define NV_RX2_ERROR3 (1<<20)
@@ -2832,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2832 } 2832 }
2833 /* framing errors are soft errors */ 2833 /* framing errors are soft errors */
2834 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2834 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2835 if (flags & NV_RX_SUBSTRACT1) 2835 if (flags & NV_RX_SUBTRACT1)
2836 len--; 2836 len--;
2837 } 2837 }
2838 /* the rest are hard errors */ 2838 /* the rest are hard errors */
@@ -2863,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2863 } 2863 }
2864 /* framing errors are soft errors */ 2864 /* framing errors are soft errors */
2865 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2865 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2866 if (flags & NV_RX2_SUBSTRACT1) 2866 if (flags & NV_RX2_SUBTRACT1)
2867 len--; 2867 len--;
2868 } 2868 }
2869 /* the rest are hard errors */ 2869 /* the rest are hard errors */
@@ -2937,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2937 } 2937 }
2938 /* framing errors are soft errors */ 2938 /* framing errors are soft errors */
2939 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2939 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2940 if (flags & NV_RX2_SUBSTRACT1) 2940 if (flags & NV_RX2_SUBTRACT1)
2941 len--; 2941 len--;
2942 } 2942 }
2943 /* the rest are hard errors */ 2943 /* the rest are hard errors */
@@ -4285,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4285 if (np->duplex) 4285 if (np->duplex)
4286 ecmd->duplex = DUPLEX_FULL; 4286 ecmd->duplex = DUPLEX_FULL;
4287 } else { 4287 } else {
4288 speed = -1; 4288 speed = SPEED_UNKNOWN;
4289 ecmd->duplex = -1; 4289 ecmd->duplex = DUPLEX_UNKNOWN;
4290 } 4290 }
4291 ethtool_cmd_speed_set(ecmd, speed); 4291 ethtool_cmd_speed_set(ecmd, speed);
4292 ecmd->autoneg = np->autoneg; 4292 ecmd->autoneg = np->autoneg;
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5766 dev->netdev_ops = &nv_netdev_ops_optimized; 5766 dev->netdev_ops = &nv_netdev_ops_optimized;
5767 5767
5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5769 SET_ETHTOOL_OPS(dev, &ops); 5769 dev->ethtool_ops = &ops;
5770 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5770 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5771 5771
5772 pci_set_drvdata(pci_dev, dev); 5772 pci_set_drvdata(pci_dev, dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 422d9b51ac24..8706c0dbd0c3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1361,7 +1361,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1361 __lpc_eth_clock_enable(pldat, true); 1361 __lpc_eth_clock_enable(pldat, true);
1362 1362
1363 /* Map IO space */ 1363 /* Map IO space */
1364 pldat->net_base = ioremap(res->start, res->end - res->start + 1); 1364 pldat->net_base = ioremap(res->start, resource_size(res));
1365 if (!pldat->net_base) { 1365 if (!pldat->net_base) {
1366 dev_err(&pdev->dev, "failed to map registers\n"); 1366 dev_err(&pdev->dev, "failed to map registers\n");
1367 ret = -ENOMEM; 1367 ret = -ENOMEM;
@@ -1417,10 +1417,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1417 } 1417 }
1418 pldat->dma_buff_base_p = dma_handle; 1418 pldat->dma_buff_base_p = dma_handle;
1419 1419
1420 netdev_dbg(ndev, "IO address start :0x%08x\n", 1420 netdev_dbg(ndev, "IO address space :%pR\n", res);
1421 res->start); 1421 netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
1422 netdev_dbg(ndev, "IO address size :%d\n",
1423 res->end - res->start + 1);
1424 netdev_dbg(ndev, "IO address (mapped) :0x%p\n", 1422 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1425 pldat->net_base); 1423 pldat->net_base);
1426 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); 1424 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index a588ffde9700..44c8be1c6805 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI && (X86 || COMPILE_TEST) 7 depends on PCI && (X86_32 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 826f0ccdc23c..4fe8ea96bd25 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -91,7 +91,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
91 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); 91 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
92 92
93 if (!netif_carrier_ok(adapter->netdev)) 93 if (!netif_carrier_ok(adapter->netdev))
94 ethtool_cmd_speed_set(ecmd, -1); 94 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
95 return ret; 95 return ret;
96} 96}
97 97
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
508 508
509void pch_gbe_set_ethtool_ops(struct net_device *netdev) 509void pch_gbe_set_ethtool_ops(struct net_device *netdev)
510{ 510{
511 SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops); 511 netdev->ethtool_ops = &pch_gbe_ethtool_ops;
512} 512}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index b6bdeb3c1971..9a997e4c3e08 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
724 724
725 /* The Hamachi-specific entries in the device structure. */ 725 /* The Hamachi-specific entries in the device structure. */
726 dev->netdev_ops = &hamachi_netdev_ops; 726 dev->netdev_ops = &hamachi_netdev_ops;
727 if (chip_tbl[hmp->chip_id].flags & CanHaveMII) 727 dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
728 SET_ETHTOOL_OPS(dev, &ethtool_ops); 728 &ethtool_ops : &ethtool_ops_no_mii;
729 else
730 SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
731 dev->watchdog_timeo = TX_TIMEOUT; 729 dev->watchdog_timeo = TX_TIMEOUT;
732 if (mtu) 730 if (mtu)
733 dev->mtu = mtu; 731 dev->mtu = mtu;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 9a6cb482dcd0..69a8dc095072 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
472 472
473 /* The Yellowfin-specific entries in the device structure. */ 473 /* The Yellowfin-specific entries in the device structure. */
474 dev->netdev_ops = &netdev_ops; 474 dev->netdev_ops = &netdev_ops;
475 SET_ETHTOOL_OPS(dev, &ethtool_ops); 475 dev->ethtool_ops = &ethtool_ops;
476 dev->watchdog_timeo = TX_TIMEOUT; 476 dev->watchdog_timeo = TX_TIMEOUT;
477 477
478 if (mtu) 478 if (mtu)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd3116e45..d49cba129081 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
66 Say Y here if you want to enable hardware offload support for 66 Say Y here if you want to enable hardware offload support for
67 Virtual eXtensible Local Area Network (VXLAN) in the driver. 67 Virtual eXtensible Local Area Network (VXLAN) in the driver.
68 68
69config QLCNIC_HWMON
70 bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
71 depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
72 default y
73 ---help---
74 This configuration parameter can be used to read the
75 board temperature in Converged Ethernet devices
76 supported by qlcnic.
77
78 This data is available via the hwmon sysfs interface.
79
69config QLGE 80config QLGE
70 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 81 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
71 depends on PCI 82 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f09c35d669b3..5bf05818a12c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1373 1373
1374 netxen_nic_change_mtu(netdev, netdev->mtu); 1374 netxen_nic_change_mtu(netdev, netdev->mtu);
1375 1375
1376 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 1376 netdev->ethtool_ops = &netxen_nic_ethtool_ops;
1377 1377
1378 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1378 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1379 NETIF_F_RXCSUM; 1379 NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2eabd44f8914..b5d6bc1a8b00 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
3838 3838
3839 /* Set driver entry points */ 3839 /* Set driver entry points */
3840 ndev->netdev_ops = &ql3xxx_netdev_ops; 3840 ndev->netdev_ops = &ql3xxx_netdev_ops;
3841 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3841 ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3842 ndev->watchdog_timeo = 5 * HZ; 3842 ndev->watchdog_timeo = 5 * HZ;
3843 3843
3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01c7d12..be618b9e874f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
39 39
40#define _QLCNIC_LINUX_MAJOR 5 40#define _QLCNIC_LINUX_MAJOR 5
41#define _QLCNIC_LINUX_MINOR 3 41#define _QLCNIC_LINUX_MINOR 3
42#define _QLCNIC_LINUX_SUBVERSION 57 42#define _QLCNIC_LINUX_SUBVERSION 60
43#define QLCNIC_LINUX_VERSIONID "5.3.57" 43#define QLCNIC_LINUX_VERSIONID "5.3.60"
44#define QLCNIC_DRV_IDC_VER 0x01 44#define QLCNIC_DRV_IDC_VER 0x01
45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -441,6 +441,8 @@ struct qlcnic_82xx_dump_template_hdr {
441 u32 rsvd1[0]; 441 u32 rsvd1[0];
442}; 442};
443 443
444#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
445
444struct qlcnic_fw_dump { 446struct qlcnic_fw_dump {
445 u8 clr; /* flag to indicate if dump is cleared */ 447 u8 clr; /* flag to indicate if dump is cleared */
446 bool enable; /* enable/disable dump */ 448 bool enable; /* enable/disable dump */
@@ -537,6 +539,7 @@ struct qlcnic_hardware_context {
537 u8 phys_port_id[ETH_ALEN]; 539 u8 phys_port_id[ETH_ALEN];
538 u8 lb_mode; 540 u8 lb_mode;
539 u16 vxlan_port; 541 u16 vxlan_port;
542 struct device *hwmon_dev;
540}; 543};
541 544
542struct qlcnic_adapter_stats { 545struct qlcnic_adapter_stats {
@@ -1018,6 +1021,8 @@ struct qlcnic_ipaddr {
1018#define QLCNIC_DEL_VXLAN_PORT 0x200000 1021#define QLCNIC_DEL_VXLAN_PORT 0x200000
1019#endif 1022#endif
1020 1023
1024#define QLCNIC_VLAN_FILTERING 0x800000
1025
1021#define QLCNIC_IS_MSI_FAMILY(adapter) \ 1026#define QLCNIC_IS_MSI_FAMILY(adapter) \
1022 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 1027 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
1023#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 1028#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1316,6 +1321,7 @@ struct qlcnic_eswitch {
1316#define QL_STATUS_INVALID_PARAM -1 1321#define QL_STATUS_INVALID_PARAM -1
1317 1322
1318#define MAX_BW 100 /* % of link speed */ 1323#define MAX_BW 100 /* % of link speed */
1324#define MIN_BW 1 /* % of link speed */
1319#define MAX_VLAN_ID 4095 1325#define MAX_VLAN_ID 4095
1320#define MIN_VLAN_ID 2 1326#define MIN_VLAN_ID 2
1321#define DEFAULT_MAC_LEARN 1 1327#define DEFAULT_MAC_LEARN 1
@@ -1692,7 +1698,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1692int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); 1698int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
1693void qlcnic_set_netdev_features(struct qlcnic_adapter *, 1699void qlcnic_set_netdev_features(struct qlcnic_adapter *,
1694 struct qlcnic_esw_func_cfg *); 1700 struct qlcnic_esw_func_cfg *);
1695void qlcnic_sriov_vf_schedule_multi(struct net_device *); 1701void qlcnic_sriov_vf_set_multi(struct net_device *);
1696int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8); 1702int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
1697int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *, 1703int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
1698 u16 *); 1704 u16 *);
@@ -2338,6 +2344,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
2338 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; 2344 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
2339} 2345}
2340 2346
2347static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
2348{
2349 bool status;
2350
2351 status = (qlcnic_sriov_pf_check(adapter) ||
2352 qlcnic_sriov_vf_check(adapter)) ? true : false;
2353
2354 return status;
2355}
2356
2341static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) 2357static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2342{ 2358{
2343 if (qlcnic_84xx_check(adapter)) 2359 if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2361,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2345 else 2361 else
2346 return QLC_DEFAULT_VNIC_COUNT; 2362 return QLC_DEFAULT_VNIC_COUNT;
2347} 2363}
2364
2365#ifdef CONFIG_QLCNIC_HWMON
2366void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
2367void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
2368#else
2369static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
2370{
2371 return;
2372}
2373static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
2374{
2375 return;
2376}
2377#endif
2348#endif /* __QLCNIC_H_ */ 2378#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb46a75d..a4a4ec0b68f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
33#define RSS_HASHTYPE_IP_TCP 0x3 33#define RSS_HASHTYPE_IP_TCP 0x3
34#define QLC_83XX_FW_MBX_CMD 0 34#define QLC_83XX_FW_MBX_CMD 0
35#define QLC_SKIP_INACTIVE_PCI_REGS 7 35#define QLC_SKIP_INACTIVE_PCI_REGS 7
36#define QLC_MAX_LEGACY_FUNC_SUPP 8
36 37
37static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { 38static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
38 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, 39 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
357 if (!ahw->intr_tbl) 358 if (!ahw->intr_tbl)
358 return -ENOMEM; 359 return -ENOMEM;
359 360
360 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 361 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
362 if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
363 dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
364 ahw->pci_func);
365 return -EOPNOTSUPP;
366 }
367
361 qlcnic_83xx_enable_legacy(adapter); 368 qlcnic_83xx_enable_legacy(adapter);
369 }
362 370
363 for (i = 0; i < num_msix; i++) { 371 for (i = 0; i < num_msix; i++) {
364 if (adapter->flags & QLCNIC_MSIX_ENABLED) 372 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
879 return 0; 887 return 0;
880 } 888 }
881 } 889 }
890
891 dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
892 __func__, type);
882 return -EINVAL; 893 return -EINVAL;
883} 894}
884 895
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
3026 QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); 3037 QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
3027} 3038}
3028 3039
3029int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, 3040int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
3030 u32 *data, u32 count) 3041 u32 *data, u32 count)
3031{ 3042{
3032 int i, j, ret = 0; 3043 int i, j, ret = 0;
3033 u32 temp; 3044 u32 temp;
3034 int err = 0;
3035 3045
3036 /* Check alignment */ 3046 /* Check alignment */
3037 if (addr & 0xF) 3047 if (addr & 0xF)
3038 return -EIO; 3048 return -EIO;
3039 3049
3040 mutex_lock(&adapter->ahw->mem_lock); 3050 mutex_lock(&adapter->ahw->mem_lock);
3041 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0); 3051 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
3042 3052
3043 for (i = 0; i < count; i++, addr += 16) { 3053 for (i = 0; i < count; i++, addr += 16) {
3044 if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET, 3054 if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
3049 return -EIO; 3059 return -EIO;
3050 } 3060 }
3051 3061
3052 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr); 3062 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
3053 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO, 3063 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
3054 *data++); 3064 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
3055 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI, 3065 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
3056 *data++); 3066 qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
3057 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO, 3067 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
3058 *data++); 3068 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
3059 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
3060 *data++);
3061 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
3062 QLCNIC_TA_WRITE_ENABLE);
3063 qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
3064 QLCNIC_TA_WRITE_START);
3065 3069
3066 for (j = 0; j < MAX_CTL_CHECK; j++) { 3070 for (j = 0; j < MAX_CTL_CHECK; j++) {
3067 temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err); 3071 temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
3068 if (err == -EIO) {
3069 mutex_unlock(&adapter->ahw->mem_lock);
3070 return err;
3071 }
3072 3072
3073 if ((temp & TA_CTL_BUSY) == 0) 3073 if ((temp & TA_CTL_BUSY) == 0)
3074 break; 3074 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c35633..2bf101a47d02 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -418,7 +418,6 @@ enum qlcnic_83xx_states {
418#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) 418#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
419#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) 419#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
420#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) 420#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
421#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
422#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) 421#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
423#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) 422#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
424#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 423#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
@@ -560,7 +559,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
560void qlcnic_83xx_napi_enable(struct qlcnic_adapter *); 559void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
561void qlcnic_83xx_napi_disable(struct qlcnic_adapter *); 560void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
562int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32); 561int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
563void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32); 562int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
564int qlcnic_ind_rd(struct qlcnic_adapter *, u32); 563int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
565int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); 564int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
566int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, 565int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +616,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
617int qlcnic_83xx_lock_driver(struct qlcnic_adapter *); 616int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
618void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *); 617void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
619int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *); 618int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
620int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
621int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *); 619int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
622int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int); 620int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
623int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); 621int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +657,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
659u32 qlcnic_83xx_get_cap_size(void *, int); 657u32 qlcnic_83xx_get_cap_size(void *, int);
660void qlcnic_83xx_set_sys_info(void *, int, u32); 658void qlcnic_83xx_set_sys_info(void *, int, u32);
661void qlcnic_83xx_store_cap_mask(void *, u32); 659void qlcnic_83xx_store_cap_mask(void *, u32);
660int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
662#endif 661#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c721ee97..f33559b72528 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1363 return ret; 1363 return ret;
1364 } 1364 }
1365 /* 16 byte write to MS memory */ 1365 /* 16 byte write to MS memory */
1366 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1366 ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1367 size / 16); 1367 size / 16);
1368 if (ret) { 1368 if (ret) {
1369 vfree(p_cache); 1369 vfree(p_cache);
1370 return ret; 1370 return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1389 p_cache = (u32 *)fw->data; 1389 p_cache = (u32 *)fw->data;
1390 addr = (u64)dest; 1390 addr = (u64)dest;
1391 1391
1392 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1392 ret = qlcnic_ms_mem_write128(adapter, addr,
1393 p_cache, size / 16); 1393 p_cache, size / 16);
1394 if (ret) { 1394 if (ret) {
1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1396 release_firmware(fw); 1396 release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1405 data[i] = fw->data[size + i]; 1405 data[i] = fw->data[size + i];
1406 for (; i < 16; i++) 1406 for (; i < 16; i++)
1407 data[i] = 0; 1407 data[i] = 0;
1408 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1408 ret = qlcnic_ms_mem_write128(adapter, addr,
1409 (u32 *)data, 1); 1409 (u32 *)data, 1);
1410 if (ret) { 1410 if (ret) {
1411 dev_err(&adapter->pdev->dev, 1411 dev_err(&adapter->pdev->dev,
1412 "MS memory write failed\n"); 1412 "MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2182 max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2183 } else { 2183 } else {
2184 dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
2185 __func__, ret);
2184 return -EIO; 2186 return -EIO;
2185 } 2187 }
2186 2188
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5715b0..304e247bdf33 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
1027 u32 arg1; 1027 u32 arg1;
1028 1028
1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
1031 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1032 __func__);
1031 return err; 1033 return err;
1034 }
1032 1035
1033 arg1 = id | (enable_mirroring ? BIT_4 : 0); 1036 arg1 = id | (enable_mirroring ? BIT_4 : 0);
1034 arg1 |= pci_func << 8; 1037 arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1318 u32 arg1, arg2 = 0; 1321 u32 arg1, arg2 = 0;
1319 u8 pci_func; 1322 u8 pci_func;
1320 1323
1321 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1324 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
1325 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1326 __func__);
1322 return err; 1327 return err;
1328 }
1329
1323 pci_func = esw_cfg->pci_func; 1330 pci_func = esw_cfg->pci_func;
1324 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1331 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1325 if (index < 0) 1332 if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1363 arg1 &= ~(0x0ffff << 16); 1370 arg1 &= ~(0x0ffff << 16);
1364 break; 1371 break;
1365 default: 1372 default:
1373 dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
1374 __func__, esw_cfg->op_mode);
1366 return err; 1375 return err;
1367 } 1376 }
1368 1377
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5bacf5210aed..1b7f3dbae289 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -726,6 +726,11 @@ static int qlcnic_set_channels(struct net_device *dev,
726 struct qlcnic_adapter *adapter = netdev_priv(dev); 726 struct qlcnic_adapter *adapter = netdev_priv(dev);
727 int err; 727 int err;
728 728
729 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
730 netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
731 return -EINVAL;
732 }
733
729 if (channel->other_count || channel->combined_count) 734 if (channel->other_count || channel->combined_count)
730 return -EINVAL; 735 return -EINVAL;
731 736
@@ -734,7 +739,7 @@ static int qlcnic_set_channels(struct net_device *dev,
734 if (err) 739 if (err)
735 return err; 740 return err;
736 741
737 if (channel->rx_count) { 742 if (adapter->drv_sds_rings != channel->rx_count) {
738 err = qlcnic_validate_rings(adapter, channel->rx_count, 743 err = qlcnic_validate_rings(adapter, channel->rx_count,
739 QLCNIC_RX_QUEUE); 744 QLCNIC_RX_QUEUE);
740 if (err) { 745 if (err) {
@@ -745,7 +750,7 @@ static int qlcnic_set_channels(struct net_device *dev,
745 adapter->drv_rss_rings = channel->rx_count; 750 adapter->drv_rss_rings = channel->rx_count;
746 } 751 }
747 752
748 if (channel->tx_count) { 753 if (adapter->drv_tx_rings != channel->tx_count) {
749 err = qlcnic_validate_rings(adapter, channel->tx_count, 754 err = qlcnic_validate_rings(adapter, channel->tx_count,
750 QLCNIC_TX_QUEUE); 755 QLCNIC_TX_QUEUE);
751 if (err) { 756 if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4e70b5..851cb4a80d50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
373 return data; 373 return data;
374} 374}
375 375
376void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) 376int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
377{ 377{
378 int ret = 0;
379
378 if (qlcnic_82xx_check(adapter)) 380 if (qlcnic_82xx_check(adapter))
379 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data); 381 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
380 else 382 else
381 qlcnic_83xx_wrt_reg_indirect(adapter, addr, data); 383 ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
384
385 return ret;
382} 386}
383 387
384static int 388static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
567void qlcnic_set_multi(struct net_device *netdev) 571void qlcnic_set_multi(struct net_device *netdev)
568{ 572{
569 struct qlcnic_adapter *adapter = netdev_priv(netdev); 573 struct qlcnic_adapter *adapter = netdev_priv(netdev);
570 struct qlcnic_mac_vlan_list *cur;
571 struct netdev_hw_addr *ha;
572 size_t temp;
573 574
574 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 575 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
575 return; 576 return;
576 if (qlcnic_sriov_vf_check(adapter)) { 577
577 if (!netdev_mc_empty(netdev)) { 578 if (qlcnic_sriov_vf_check(adapter))
578 netdev_for_each_mc_addr(ha, netdev) { 579 qlcnic_sriov_vf_set_multi(netdev);
579 temp = sizeof(struct qlcnic_mac_vlan_list); 580 else
580 cur = kzalloc(temp, GFP_ATOMIC); 581 __qlcnic_set_multi(netdev, 0);
581 if (cur == NULL)
582 break;
583 memcpy(cur->mac_addr,
584 ha->addr, ETH_ALEN);
585 list_add_tail(&cur->list, &adapter->vf_mc_list);
586 }
587 }
588 qlcnic_sriov_vf_schedule_multi(adapter->netdev);
589 return;
590 }
591 __qlcnic_set_multi(netdev, 0);
592} 582}
593 583
594int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 584int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
630 struct hlist_node *n; 620 struct hlist_node *n;
631 struct hlist_head *head; 621 struct hlist_head *head;
632 int i; 622 int i;
633 unsigned long time; 623 unsigned long expires;
634 u8 cmd; 624 u8 cmd;
635 625
636 for (i = 0; i < adapter->fhash.fbucket_size; i++) { 626 for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
638 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { 628 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
639 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : 629 cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
640 QLCNIC_MAC_DEL; 630 QLCNIC_MAC_DEL;
641 time = tmp_fil->ftime; 631 expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
642 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 632 if (time_before(expires, jiffies)) {
643 qlcnic_sre_macaddr_change(adapter, 633 qlcnic_sre_macaddr_change(adapter,
644 tmp_fil->faddr, 634 tmp_fil->faddr,
645 tmp_fil->vlan_id, 635 tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
657 647
658 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) 648 hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
659 { 649 {
660 time = tmp_fil->ftime; 650 expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
661 if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { 651 if (time_before(expires, jiffies)) {
662 spin_lock_bh(&adapter->rx_mac_learn_lock); 652 spin_lock_bh(&adapter->rx_mac_learn_lock);
663 adapter->rx_fhash.fnum--; 653 adapter->rx_fhash.fnum--;
664 hlist_del(&tmp_fil->fnode); 654 hlist_del(&tmp_fil->fnode);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d12991f..e45bf09af0c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
305{ 305{
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); 306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
308 struct net_device *netdev = adapter->netdev;
309 u16 protocol = ntohs(skb->protocol); 308 u16 protocol = ntohs(skb->protocol);
310 struct qlcnic_filter *fil, *tmp_fil; 309 struct qlcnic_filter *fil, *tmp_fil;
311 struct hlist_head *head; 310 struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
314 u16 vlan_id = 0; 313 u16 vlan_id = 0;
315 u8 hindex, hval; 314 u8 hindex, hval;
316 315
317 if (!qlcnic_sriov_pf_check(adapter)) { 316 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
318 if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) 317 return;
319 return; 318
320 } else { 319 if (adapter->flags & QLCNIC_VLAN_FILTERING) {
321 if (protocol == ETH_P_8021Q) { 320 if (protocol == ETH_P_8021Q) {
322 vh = (struct vlan_ethhdr *)skb->data; 321 vh = (struct vlan_ethhdr *)skb->data;
323 vlan_id = ntohs(vh->h_vlan_TCI); 322 vlan_id = ntohs(vh->h_vlan_TCI);
324 } else if (vlan_tx_tag_present(skb)) { 323 } else if (vlan_tx_tag_present(skb)) {
325 vlan_id = vlan_tx_tag_get(skb); 324 vlan_id = vlan_tx_tag_get(skb);
326 } 325 }
327
328 if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
329 !vlan_id)
330 return;
331 }
332
333 if (adapter->fhash.fnum >= adapter->fhash.fmax) {
334 adapter->stats.mac_filter_limit_overrun++;
335 netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
336 adapter->fhash.fmax, adapter->fhash.fnum);
337 return;
338 } 326 }
339 327
340 memcpy(&src_addr, phdr->h_source, ETH_ALEN); 328 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
353 } 341 }
354 } 342 }
355 343
344 if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
345 adapter->stats.mac_filter_limit_overrun++;
346 return;
347 }
348
356 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); 349 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
357 if (!fil) 350 if (!fil)
358 return; 351 return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1216 if (!skb) 1209 if (!skb)
1217 return buffer; 1210 return buffer;
1218 1211
1219 if (adapter->drv_mac_learn && 1212 if (adapter->rx_mac_learn) {
1220 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1221 t_vid = 0; 1213 t_vid = 0;
1222 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1214 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1223 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); 1215 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1293 if (!skb) 1285 if (!skb)
1294 return buffer; 1286 return buffer;
1295 1287
1296 if (adapter->drv_mac_learn && 1288 if (adapter->rx_mac_learn) {
1297 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1298 t_vid = 0; 1289 t_vid = 0;
1299 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1290 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1300 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); 1291 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88a81bf..4fc186713b66 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
378 if (!adapter->fdb_mac_learn) 378 if (!adapter->fdb_mac_learn)
379 return ndo_dflt_fdb_del(ndm, tb, netdev, addr); 379 return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
380 380
381 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 381 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
382 qlcnic_sriov_check(adapter)) {
382 if (is_unicast_ether_addr(addr)) { 383 if (is_unicast_ether_addr(addr)) {
383 err = dev_uc_del(netdev, addr); 384 err = dev_uc_del(netdev, addr);
384 if (!err) 385 if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
402 if (!adapter->fdb_mac_learn) 403 if (!adapter->fdb_mac_learn)
403 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); 404 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
404 405
405 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 406 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
407 !qlcnic_sriov_check(adapter)) {
406 pr_info("%s: FDB e-switch is not enabled\n", __func__); 408 pr_info("%s: FDB e-switch is not enabled\n", __func__);
407 return -EOPNOTSUPP; 409 return -EOPNOTSUPP;
408 } 410 }
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
432 if (!adapter->fdb_mac_learn) 434 if (!adapter->fdb_mac_learn)
433 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 435 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
434 436
435 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 437 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
438 qlcnic_sriov_check(adapter))
436 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 439 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
437 440
438 return idx; 441 return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
522#endif 525#endif
523#ifdef CONFIG_QLCNIC_SRIOV 526#ifdef CONFIG_QLCNIC_SRIOV
524 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, 527 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
525 .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, 528 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
526 .ndo_get_vf_config = qlcnic_sriov_get_vf_config, 529 .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
527 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, 530 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
528 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, 531 .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
690 adapter->msix_entries[vector].entry = vector; 693 adapter->msix_entries[vector].entry = vector;
691 694
692restore: 695restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 696 err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
694 if (err > 0) { 697 if (err == -ENOSPC) {
695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings) 698 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 return -ENOSPC; 699 return err;
697 700
698 netdev_info(adapter->netdev, 701 netdev_info(adapter->netdev,
699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 702 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
1014 1017
1015 if (pfn >= ahw->max_vnic_func) { 1018 if (pfn >= ahw->max_vnic_func) {
1016 ret = QL_STATUS_INVALID_PARAM; 1019 ret = QL_STATUS_INVALID_PARAM;
1020 dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
1021 __func__, pfn, ahw->max_vnic_func);
1017 goto err_eswitch; 1022 goto err_eswitch;
1018 } 1023 }
1019 1024
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1915 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) 1920 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1916 return; 1921 return;
1917 1922
1918 if (qlcnic_sriov_vf_check(adapter))
1919 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1920 smp_mb(); 1923 smp_mb();
1921 netif_carrier_off(netdev); 1924 netif_carrier_off(netdev);
1922 adapter->ahw->linkup = 0; 1925 adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1928 qlcnic_delete_lb_filters(adapter); 1931 qlcnic_delete_lb_filters(adapter);
1929 1932
1930 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); 1933 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1934 if (qlcnic_sriov_vf_check(adapter))
1935 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1931 1936
1932 qlcnic_napi_disable(adapter); 1937 qlcnic_napi_disable(adapter);
1933 1938
@@ -2052,6 +2057,7 @@ out:
2052 2057
2053static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) 2058static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
2054{ 2059{
2060 struct qlcnic_hardware_context *ahw = adapter->ahw;
2055 int err = 0; 2061 int err = 0;
2056 2062
2057 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), 2063 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
2061 goto err_out; 2067 goto err_out;
2062 } 2068 }
2063 2069
2070 if (qlcnic_83xx_check(adapter)) {
2071 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
2072 ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
2073 ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
2074 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
2075 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
2076 } else {
2077 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
2078 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
2079 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
2080 }
2081
2064 /* clear stats */ 2082 /* clear stats */
2065 memset(&adapter->stats, 0, sizeof(adapter->stats)); 2083 memset(&adapter->stats, 0, sizeof(adapter->stats));
2066err_out: 2084err_out:
@@ -2069,12 +2087,20 @@ err_out:
2069 2087
2070static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) 2088static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
2071{ 2089{
2090 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
2091
2072 kfree(adapter->recv_ctx); 2092 kfree(adapter->recv_ctx);
2073 adapter->recv_ctx = NULL; 2093 adapter->recv_ctx = NULL;
2074 2094
2075 if (adapter->ahw->fw_dump.tmpl_hdr) { 2095 if (fw_dump->tmpl_hdr) {
2076 vfree(adapter->ahw->fw_dump.tmpl_hdr); 2096 vfree(fw_dump->tmpl_hdr);
2077 adapter->ahw->fw_dump.tmpl_hdr = NULL; 2097 fw_dump->tmpl_hdr = NULL;
2098 }
2099
2100 if (fw_dump->dma_buffer) {
2101 dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
2102 fw_dump->dma_buffer, fw_dump->phys_addr);
2103 fw_dump->dma_buffer = NULL;
2078 } 2104 }
2079 2105
2080 kfree(adapter->ahw->reset.buff); 2106 kfree(adapter->ahw->reset.buff);
@@ -2247,10 +2273,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2247 2273
2248 qlcnic_change_mtu(netdev, netdev->mtu); 2274 qlcnic_change_mtu(netdev, netdev->mtu);
2249 2275
2250 if (qlcnic_sriov_vf_check(adapter)) 2276 netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
2251 SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops); 2277 &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
2252 else
2253 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
2254 2278
2255 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2279 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2256 NETIF_F_IPV6_CSUM | NETIF_F_GRO | 2280 NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2417,9 +2441,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2417 int err, pci_using_dac = -1; 2441 int err, pci_using_dac = -1;
2418 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 2442 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
2419 2443
2420 if (pdev->is_virtfn)
2421 return -ENODEV;
2422
2423 err = pci_enable_device(pdev); 2444 err = pci_enable_device(pdev);
2424 if (err) 2445 if (err)
2425 return err; 2446 return err;
@@ -2552,9 +2573,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2552 case -ENOMEM: 2573 case -ENOMEM:
2553 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); 2574 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
2554 goto err_out_free_hw; 2575 goto err_out_free_hw;
2576 case -EOPNOTSUPP:
2577 dev_err(&pdev->dev, "Adapter initialization failed\n");
2578 goto err_out_free_hw;
2555 default: 2579 default:
2556 dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n"); 2580 dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
2557 dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
2558 goto err_out_maintenance_mode; 2581 goto err_out_maintenance_mode;
2559 } 2582 }
2560 } 2583 }
@@ -2628,7 +2651,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2628 qlcnic_alloc_lb_filters_mem(adapter); 2651 qlcnic_alloc_lb_filters_mem(adapter);
2629 2652
2630 qlcnic_add_sysfs(adapter); 2653 qlcnic_add_sysfs(adapter);
2631 2654 qlcnic_register_hwmon_dev(adapter);
2632 return 0; 2655 return 0;
2633 2656
2634err_out_disable_mbx_intr: 2657err_out_disable_mbx_intr:
@@ -2665,7 +2688,7 @@ err_out_disable_pdev:
2665err_out_maintenance_mode: 2688err_out_maintenance_mode:
2666 set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state); 2689 set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
2667 netdev->netdev_ops = &qlcnic_netdev_failed_ops; 2690 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2668 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); 2691 netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
2669 ahw->port_type = QLCNIC_XGBE; 2692 ahw->port_type = QLCNIC_XGBE;
2670 2693
2671 if (qlcnic_83xx_check(adapter)) 2694 if (qlcnic_83xx_check(adapter))
@@ -2698,9 +2721,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
2698 return; 2721 return;
2699 2722
2700 netdev = adapter->netdev; 2723 netdev = adapter->netdev;
2701 qlcnic_sriov_pf_disable(adapter);
2702 2724
2703 qlcnic_cancel_idc_work(adapter); 2725 qlcnic_cancel_idc_work(adapter);
2726 qlcnic_sriov_pf_disable(adapter);
2704 ahw = adapter->ahw; 2727 ahw = adapter->ahw;
2705 2728
2706 unregister_netdev(netdev); 2729 unregister_netdev(netdev);
@@ -2735,6 +2758,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
2735 2758
2736 qlcnic_remove_sysfs(adapter); 2759 qlcnic_remove_sysfs(adapter);
2737 2760
2761 qlcnic_unregister_hwmon_dev(adapter);
2762
2738 qlcnic_cleanup_pci_map(adapter->ahw); 2763 qlcnic_cleanup_pci_map(adapter->ahw);
2739 2764
2740 qlcnic_release_firmware(adapter); 2765 qlcnic_release_firmware(adapter);
@@ -2828,6 +2853,8 @@ static int qlcnic_close(struct net_device *netdev)
2828 return 0; 2853 return 0;
2829} 2854}
2830 2855
2856#define QLCNIC_VF_LB_BUCKET_SIZE 1
2857
2831void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) 2858void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2832{ 2859{
2833 void *head; 2860 void *head;
@@ -2843,7 +2870,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2843 spin_lock_init(&adapter->mac_learn_lock); 2870 spin_lock_init(&adapter->mac_learn_lock);
2844 spin_lock_init(&adapter->rx_mac_learn_lock); 2871 spin_lock_init(&adapter->rx_mac_learn_lock);
2845 2872
2846 if (qlcnic_82xx_check(adapter)) { 2873 if (qlcnic_sriov_vf_check(adapter)) {
2874 filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
2875 adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
2876 } else if (qlcnic_82xx_check(adapter)) {
2847 filter_size = QLCNIC_LB_MAX_FILTERS; 2877 filter_size = QLCNIC_LB_MAX_FILTERS;
2848 adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE; 2878 adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
2849 } else { 2879 } else {
@@ -3973,16 +4003,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3973 strcpy(buf, "Tx"); 4003 strcpy(buf, "Tx");
3974 } 4004 }
3975 4005
3976 if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
3977 netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
3978 return -EINVAL;
3979 }
3980
3981 if (adapter->flags & QLCNIC_MSI_ENABLED) {
3982 netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
3983 return -EINVAL;
3984 }
3985
3986 if (!is_power_of_2(ring_cnt)) { 4006 if (!is_power_of_2(ring_cnt)) {
3987 netdev_err(netdev, "%s rings value should be a power of 2\n", 4007 netdev_err(netdev, "%s rings value should be a power of 2\n",
3988 buf); 4008 buf);
@@ -4122,7 +4142,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4122 4142
4123 rcu_read_lock(); 4143 rcu_read_lock();
4124 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { 4144 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4125 dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid); 4145 dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
4126 if (!dev) 4146 if (!dev)
4127 continue; 4147 continue;
4128 qlcnic_config_indev_addr(adapter, dev, event); 4148 qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b1266b..e46fc39d425d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
238 238
239 hdr->drv_cap_mask = hdr->cap_mask; 239 hdr->drv_cap_mask = hdr->cap_mask;
240 fw_dump->cap_mask = hdr->cap_mask; 240 fw_dump->cap_mask = hdr->cap_mask;
241
242 fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
241} 243}
242 244
243inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index) 245inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
276 hdr->saved_state[index] = value; 278 hdr->saved_state[index] = value;
277} 279}
278 280
281#define QLCNIC_TEMPLATE_VERSION (0x20001)
282
279void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) 283void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
280{ 284{
281 struct qlcnic_83xx_dump_template_hdr *hdr; 285 struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
288 292
289 hdr->drv_cap_mask = hdr->cap_mask; 293 hdr->drv_cap_mask = hdr->cap_mask;
290 fw_dump->cap_mask = hdr->cap_mask; 294 fw_dump->cap_mask = hdr->cap_mask;
295
296 fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
297 QLCNIC_TEMPLATE_VERSION;
291} 298}
292 299
293inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index) 300inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -653,34 +660,31 @@ out:
653#define QLC_DMA_CMD_BUFF_ADDR_HI 4 660#define QLC_DMA_CMD_BUFF_ADDR_HI 4
654#define QLC_DMA_CMD_STATUS_CTRL 8 661#define QLC_DMA_CMD_STATUS_CTRL 8
655 662
656#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
657
658static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, 663static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
659 struct __mem *mem) 664 struct __mem *mem)
660{ 665{
661 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
662 struct device *dev = &adapter->pdev->dev; 666 struct device *dev = &adapter->pdev->dev;
663 u32 dma_no, dma_base_addr, temp_addr; 667 u32 dma_no, dma_base_addr, temp_addr;
664 int i, ret, dma_sts; 668 int i, ret, dma_sts;
669 void *tmpl_hdr;
665 670
666 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; 671 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
667 dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; 672 dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
673 QLC_83XX_DMA_ENGINE_INDEX);
668 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); 674 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
669 675
670 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; 676 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
671 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 677 ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
672 mem->desc_card_addr);
673 if (ret) 678 if (ret)
674 return ret; 679 return ret;
675 680
676 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; 681 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
677 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0); 682 ret = qlcnic_ind_wr(adapter, temp_addr, 0);
678 if (ret) 683 if (ret)
679 return ret; 684 return ret;
680 685
681 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; 686 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
682 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 687 ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
683 mem->start_dma_cmd);
684 if (ret) 688 if (ret)
685 return ret; 689 return ret;
686 690
@@ -710,15 +714,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 714 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
711 u32 temp, dma_base_addr, size = 0, read_size = 0; 715 u32 temp, dma_base_addr, size = 0, read_size = 0;
712 struct qlcnic_pex_dma_descriptor *dma_descr; 716 struct qlcnic_pex_dma_descriptor *dma_descr;
713 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
714 struct device *dev = &adapter->pdev->dev; 717 struct device *dev = &adapter->pdev->dev;
715 dma_addr_t dma_phys_addr; 718 dma_addr_t dma_phys_addr;
716 void *dma_buffer; 719 void *dma_buffer;
720 void *tmpl_hdr;
717 721
718 tmpl_hdr = fw_dump->tmpl_hdr; 722 tmpl_hdr = fw_dump->tmpl_hdr;
719 723
720 /* Check if DMA engine is available */ 724 /* Check if DMA engine is available */
721 temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; 725 temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
726 QLC_83XX_DMA_ENGINE_INDEX);
722 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); 727 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
723 temp = qlcnic_ind_rd(adapter, 728 temp = qlcnic_ind_rd(adapter,
724 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); 729 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +769,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
764 769
765 /* Write DMA descriptor to MS memory*/ 770 /* Write DMA descriptor to MS memory*/
766 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; 771 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
767 *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr, 772 *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
768 (u32 *)dma_descr, temp); 773 (u32 *)dma_descr, temp);
769 if (*ret) { 774 if (*ret) {
770 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", 775 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
771 mem->desc_card_addr); 776 mem->desc_card_addr);
@@ -1141,8 +1146,6 @@ free_mem:
1141 return err; 1146 return err;
1142} 1147}
1143 1148
1144#define QLCNIC_TEMPLATE_VERSION (0x20001)
1145
1146int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 1149int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1147{ 1150{
1148 struct qlcnic_hardware_context *ahw; 1151 struct qlcnic_hardware_context *ahw;
@@ -1150,6 +1153,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1150 u32 version, csum, *tmp_buf; 1153 u32 version, csum, *tmp_buf;
1151 u8 use_flash_temp = 0; 1154 u8 use_flash_temp = 0;
1152 u32 temp_size = 0; 1155 u32 temp_size = 0;
1156 void *temp_buffer;
1153 int err; 1157 int err;
1154 1158
1155 ahw = adapter->ahw; 1159 ahw = adapter->ahw;
@@ -1199,16 +1203,23 @@ flash_temp:
1199 1203
1200 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump); 1204 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1201 1205
1206 if (fw_dump->use_pex_dma) {
1207 fw_dump->dma_buffer = NULL;
1208 temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1209 QLC_PEX_DMA_READ_SIZE,
1210 &fw_dump->phys_addr,
1211 GFP_KERNEL);
1212 if (!temp_buffer)
1213 fw_dump->use_pex_dma = false;
1214 else
1215 fw_dump->dma_buffer = temp_buffer;
1216 }
1217
1218
1202 dev_info(&adapter->pdev->dev, 1219 dev_info(&adapter->pdev->dev,
1203 "Default minidump capture mask 0x%x\n", 1220 "Default minidump capture mask 0x%x\n",
1204 fw_dump->cap_mask); 1221 fw_dump->cap_mask);
1205 1222
1206 if (qlcnic_83xx_check(adapter) &&
1207 (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
1208 fw_dump->use_pex_dma = true;
1209 else
1210 fw_dump->use_pex_dma = false;
1211
1212 qlcnic_enable_fw_dump_state(adapter); 1223 qlcnic_enable_fw_dump_state(adapter);
1213 1224
1214 return 0; 1225 return 0;
@@ -1224,7 +1235,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1224 struct device *dev = &adapter->pdev->dev; 1235 struct device *dev = &adapter->pdev->dev;
1225 struct qlcnic_hardware_context *ahw; 1236 struct qlcnic_hardware_context *ahw;
1226 struct qlcnic_dump_entry *entry; 1237 struct qlcnic_dump_entry *entry;
1227 void *temp_buffer, *tmpl_hdr; 1238 void *tmpl_hdr;
1228 u32 ocm_window; 1239 u32 ocm_window;
1229 __le32 *buffer; 1240 __le32 *buffer;
1230 char mesg[64]; 1241 char mesg[64];
@@ -1268,16 +1279,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1268 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION); 1279 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1269 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version); 1280 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1270 1281
1271 if (fw_dump->use_pex_dma) {
1272 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1273 &fw_dump->phys_addr,
1274 GFP_KERNEL);
1275 if (!temp_buffer)
1276 fw_dump->use_pex_dma = false;
1277 else
1278 fw_dump->dma_buffer = temp_buffer;
1279 }
1280
1281 if (qlcnic_82xx_check(adapter)) { 1282 if (qlcnic_82xx_check(adapter)) {
1282 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); 1283 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1283 fw_dump_ops = qlcnic_fw_dump_ops; 1284 fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1335,10 +1336,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1335 /* Send a udev event to notify availability of FW dump */ 1336 /* Send a udev event to notify availability of FW dump */
1336 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); 1337 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1337 1338
1338 if (fw_dump->use_pex_dma)
1339 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1340 fw_dump->dma_buffer, fw_dump->phys_addr);
1341
1342 return 0; 1339 return 0;
1343} 1340}
1344 1341
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1fd1d27..4677b2edccca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3, 52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
53}; 53};
54 54
55#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
55#define QLC_BC_CMD 1 56#define QLC_BC_CMD 1
56 57
57struct qlcnic_trans_list { 58struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
151 struct qlcnic_trans_list rcv_pend; 152 struct qlcnic_trans_list rcv_pend;
152 struct qlcnic_adapter *adapter; 153 struct qlcnic_adapter *adapter;
153 struct qlcnic_vport *vp; 154 struct qlcnic_vport *vp;
154 struct mutex vlan_list_lock; /* Lock for VLAN list */ 155 spinlock_t vlan_list_lock; /* Lock for VLAN list */
155}; 156};
156 157
157struct qlcnic_async_work_list { 158struct qlcnic_async_work_list {
158 struct list_head list; 159 struct list_head list;
159 struct work_struct work; 160 struct work_struct work;
160 void *ptr; 161 void *ptr;
162 struct qlcnic_cmd_args *cmd;
161}; 163};
162 164
163struct qlcnic_back_channel { 165struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
231void qlcnic_sriov_pf_reset(struct qlcnic_adapter *); 233void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
232int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *); 234int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
233int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *); 235int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
234int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int); 236int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
235int qlcnic_sriov_get_vf_config(struct net_device *, int , 237int qlcnic_sriov_get_vf_config(struct net_device *, int ,
236 struct ifla_vf_info *); 238 struct ifla_vf_info *);
237int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); 239int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1f5ab9..1659c804f1d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
39static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 39static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
40static int qlcnic_sriov_vf_shutdown(struct pci_dev *); 40static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); 41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
42static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
43 struct qlcnic_cmd_args *);
42 44
43static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 45static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
44 .read_crb = qlcnic_83xx_read_crb, 46 .read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
181 vf->adapter = adapter; 183 vf->adapter = adapter;
182 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); 184 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
183 mutex_init(&vf->send_cmd_lock); 185 mutex_init(&vf->send_cmd_lock);
184 mutex_init(&vf->vlan_list_lock); 186 spin_lock_init(&vf->vlan_list_lock);
185 INIT_LIST_HEAD(&vf->rcv_act.wait_list); 187 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
186 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); 188 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
187 spin_lock_init(&vf->rcv_act.lock); 189 spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
197 goto qlcnic_destroy_async_wq; 199 goto qlcnic_destroy_async_wq;
198 } 200 }
199 sriov->vf_info[i].vp = vp; 201 sriov->vf_info[i].vp = vp;
202 vp->vlan_mode = QLC_GUEST_VLAN_MODE;
200 vp->max_tx_bw = MAX_BW; 203 vp->max_tx_bw = MAX_BW;
201 vp->spoofchk = true; 204 vp->min_tx_bw = MIN_BW;
205 vp->spoofchk = false;
202 random_ether_addr(vp->mac); 206 random_ether_addr(vp->mac);
203 dev_info(&adapter->pdev->dev, 207 dev_info(&adapter->pdev->dev,
204 "MAC Address %pM is configured for VF %d\n", 208 "MAC Address %pM is configured for VF %d\n",
@@ -454,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
454 struct qlcnic_cmd_args cmd; 458 struct qlcnic_cmd_args cmd;
455 int ret = 0; 459 int ret = 0;
456 460
461 memset(&cmd, 0, sizeof(cmd));
457 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 462 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
458 if (ret) 463 if (ret)
459 return ret; 464 return ret;
@@ -515,6 +520,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
515{ 520{
516 int err; 521 int err;
517 522
523 adapter->flags |= QLCNIC_VLAN_FILTERING;
524 adapter->ahw->total_nic_func = 1;
518 INIT_LIST_HEAD(&adapter->vf_mc_list); 525 INIT_LIST_HEAD(&adapter->vf_mc_list);
519 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 526 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
520 dev_warn(&adapter->pdev->dev, 527 dev_warn(&adapter->pdev->dev,
@@ -770,6 +777,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
770 cmd->req.arg = (u32 *)trans->req_pay; 777 cmd->req.arg = (u32 *)trans->req_pay;
771 cmd->rsp.arg = (u32 *)trans->rsp_pay; 778 cmd->rsp.arg = (u32 *)trans->rsp_pay;
772 cmd_op = cmd->req.arg[0] & 0xff; 779 cmd_op = cmd->req.arg[0] & 0xff;
780 cmd->cmd_op = cmd_op;
773 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 781 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
774 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 782 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
775 if (remainder) 783 if (remainder)
@@ -1356,7 +1364,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1356 return -EIO; 1364 return -EIO;
1357} 1365}
1358 1366
1359static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, 1367static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1360 struct qlcnic_cmd_args *cmd) 1368 struct qlcnic_cmd_args *cmd)
1361{ 1369{
1362 struct qlcnic_hardware_context *ahw = adapter->ahw; 1370 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1416,17 @@ retry:
1408 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { 1416 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1409 rsp = QLCNIC_RCODE_SUCCESS; 1417 rsp = QLCNIC_RCODE_SUCCESS;
1410 } else { 1418 } else {
1411 rsp = mbx_err_code; 1419 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1412 if (!rsp) 1420 rsp = QLCNIC_RCODE_SUCCESS;
1413 rsp = 1; 1421 } else {
1414 dev_err(dev, 1422 rsp = mbx_err_code;
1415 "MBX command 0x%x failed with err:0x%x for VF %d\n", 1423 if (!rsp)
1416 opcode, mbx_err_code, func); 1424 rsp = 1;
1425
1426 dev_err(dev,
1427 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1428 opcode, mbx_err_code, func);
1429 }
1417 } 1430 }
1418 1431
1419err_out: 1432err_out:
@@ -1435,12 +1448,23 @@ free_cmd:
1435 return rsp; 1448 return rsp;
1436} 1449}
1437 1450
1451
1452static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1453 struct qlcnic_cmd_args *cmd)
1454{
1455 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1456 return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1457 else
1458 return __qlcnic_sriov_issue_cmd(adapter, cmd);
1459}
1460
1438static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) 1461static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1439{ 1462{
1440 struct qlcnic_cmd_args cmd; 1463 struct qlcnic_cmd_args cmd;
1441 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; 1464 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1442 int ret; 1465 int ret;
1443 1466
1467 memset(&cmd, 0, sizeof(cmd));
1444 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) 1468 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1445 return -ENOMEM; 1469 return -ENOMEM;
1446 1470
@@ -1465,58 +1489,28 @@ out:
1465 return ret; 1489 return ret;
1466} 1490}
1467 1491
1468static void qlcnic_vf_add_mc_list(struct net_device *netdev) 1492static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
1469{ 1493{
1470 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1494 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1471 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1495 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1472 struct qlcnic_mac_vlan_list *cur;
1473 struct list_head *head, tmp_list;
1474 struct qlcnic_vf_info *vf; 1496 struct qlcnic_vf_info *vf;
1475 u16 vlan_id; 1497 u16 vlan_id;
1476 int i; 1498 int i;
1477 1499
1478 static const u8 bcast_addr[ETH_ALEN] = {
1479 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1480 };
1481
1482 vf = &adapter->ahw->sriov->vf_info[0]; 1500 vf = &adapter->ahw->sriov->vf_info[0];
1483 INIT_LIST_HEAD(&tmp_list);
1484 head = &adapter->vf_mc_list;
1485 netif_addr_lock_bh(netdev);
1486 1501
1487 while (!list_empty(head)) { 1502 if (!qlcnic_sriov_check_any_vlan(vf)) {
1488 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); 1503 qlcnic_nic_add_mac(adapter, mac, 0);
1489 list_move(&cur->list, &tmp_list); 1504 } else {
1490 } 1505 spin_lock(&vf->vlan_list_lock);
1491 1506 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1492 netif_addr_unlock_bh(netdev); 1507 vlan_id = vf->sriov_vlans[i];
1493 1508 if (vlan_id)
1494 while (!list_empty(&tmp_list)) { 1509 qlcnic_nic_add_mac(adapter, mac, vlan_id);
1495 cur = list_entry((&tmp_list)->next,
1496 struct qlcnic_mac_vlan_list, list);
1497 if (!qlcnic_sriov_check_any_vlan(vf)) {
1498 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1499 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1500 } else {
1501 mutex_lock(&vf->vlan_list_lock);
1502 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1503 vlan_id = vf->sriov_vlans[i];
1504 if (vlan_id) {
1505 qlcnic_nic_add_mac(adapter, bcast_addr,
1506 vlan_id);
1507 qlcnic_nic_add_mac(adapter,
1508 cur->mac_addr,
1509 vlan_id);
1510 }
1511 }
1512 mutex_unlock(&vf->vlan_list_lock);
1513 if (qlcnic_84xx_check(adapter)) {
1514 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1515 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1516 }
1517 } 1510 }
1518 list_del(&cur->list); 1511 spin_unlock(&vf->vlan_list_lock);
1519 kfree(cur); 1512 if (qlcnic_84xx_check(adapter))
1513 qlcnic_nic_add_mac(adapter, mac, 0);
1520 } 1514 }
1521} 1515}
1522 1516
@@ -1525,6 +1519,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1525 struct list_head *head = &bc->async_list; 1519 struct list_head *head = &bc->async_list;
1526 struct qlcnic_async_work_list *entry; 1520 struct qlcnic_async_work_list *entry;
1527 1521
1522 flush_workqueue(bc->bc_async_wq);
1528 while (!list_empty(head)) { 1523 while (!list_empty(head)) {
1529 entry = list_entry(head->next, struct qlcnic_async_work_list, 1524 entry = list_entry(head->next, struct qlcnic_async_work_list,
1530 list); 1525 list);
@@ -1534,10 +1529,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1534 } 1529 }
1535} 1530}
1536 1531
1537static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1532void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1538{ 1533{
1539 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1534 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1540 struct qlcnic_hardware_context *ahw = adapter->ahw; 1535 struct qlcnic_hardware_context *ahw = adapter->ahw;
1536 static const u8 bcast_addr[ETH_ALEN] = {
1537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1538 };
1539 struct netdev_hw_addr *ha;
1541 u32 mode = VPORT_MISS_MODE_DROP; 1540 u32 mode = VPORT_MISS_MODE_DROP;
1542 1541
1543 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 1542 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1548,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1549 } else if ((netdev->flags & IFF_ALLMULTI) || 1548 } else if ((netdev->flags & IFF_ALLMULTI) ||
1550 (netdev_mc_count(netdev) > ahw->max_mc_count)) { 1549 (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1551 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1550 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1551 } else {
1552 qlcnic_vf_add_mc_list(netdev, bcast_addr);
1553 if (!netdev_mc_empty(netdev)) {
1554 netdev_for_each_mc_addr(ha, netdev)
1555 qlcnic_vf_add_mc_list(netdev, ha->addr);
1556 }
1552 } 1557 }
1553 1558
1554 if (qlcnic_sriov_vf_check(adapter)) 1559 /* configure unicast MAC address, if there is not sufficient space
1555 qlcnic_vf_add_mc_list(netdev); 1560 * to store all the unicast addresses then enable promiscuous mode
1561 */
1562 if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1563 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1564 } else if (!netdev_uc_empty(netdev)) {
1565 netdev_for_each_uc_addr(ha, netdev)
1566 qlcnic_vf_add_mc_list(netdev, ha->addr);
1567 }
1568
1569 if (adapter->pdev->is_virtfn) {
1570 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1571 !adapter->fdb_mac_learn) {
1572 qlcnic_alloc_lb_filters_mem(adapter);
1573 adapter->drv_mac_learn = 1;
1574 adapter->rx_mac_learn = true;
1575 } else {
1576 adapter->drv_mac_learn = 0;
1577 adapter->rx_mac_learn = false;
1578 }
1579 }
1556 1580
1557 qlcnic_nic_set_promisc(adapter, mode); 1581 qlcnic_nic_set_promisc(adapter, mode);
1558} 1582}
1559 1583
1560static void qlcnic_sriov_handle_async_multi(struct work_struct *work) 1584static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1561{ 1585{
1562 struct qlcnic_async_work_list *entry; 1586 struct qlcnic_async_work_list *entry;
1563 struct net_device *netdev; 1587 struct qlcnic_adapter *adapter;
1588 struct qlcnic_cmd_args *cmd;
1564 1589
1565 entry = container_of(work, struct qlcnic_async_work_list, work); 1590 entry = container_of(work, struct qlcnic_async_work_list, work);
1566 netdev = (struct net_device *)entry->ptr; 1591 adapter = entry->ptr;
1567 1592 cmd = entry->cmd;
1568 qlcnic_sriov_vf_set_multi(netdev); 1593 __qlcnic_sriov_issue_cmd(adapter, cmd);
1569 return; 1594 return;
1570} 1595}
1571 1596
@@ -1595,8 +1620,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1595 return entry; 1620 return entry;
1596} 1621}
1597 1622
1598static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, 1623static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1599 work_func_t func, void *data) 1624 work_func_t func, void *data,
1625 struct qlcnic_cmd_args *cmd)
1600{ 1626{
1601 struct qlcnic_async_work_list *entry = NULL; 1627 struct qlcnic_async_work_list *entry = NULL;
1602 1628
@@ -1605,21 +1631,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1605 return; 1631 return;
1606 1632
1607 entry->ptr = data; 1633 entry->ptr = data;
1634 entry->cmd = cmd;
1608 INIT_WORK(&entry->work, func); 1635 INIT_WORK(&entry->work, func);
1609 queue_work(bc->bc_async_wq, &entry->work); 1636 queue_work(bc->bc_async_wq, &entry->work);
1610} 1637}
1611 1638
1612void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) 1639static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1640 struct qlcnic_cmd_args *cmd)
1613{ 1641{
1614 1642
1615 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1616 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; 1643 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1617 1644
1618 if (adapter->need_fw_reset) 1645 if (adapter->need_fw_reset)
1619 return; 1646 return -EIO;
1620 1647
1621 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, 1648 qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
1622 netdev); 1649 adapter, cmd);
1650 return 0;
1623} 1651}
1624 1652
1625static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) 1653static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1871,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1843 return 0; 1871 return 0;
1844} 1872}
1845 1873
1874static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1875{
1876 if (adapter->fhash.fnum)
1877 qlcnic_prune_lb_filters(adapter);
1878}
1879
1846static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) 1880static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1847{ 1881{
1848 struct qlcnic_adapter *adapter; 1882 struct qlcnic_adapter *adapter;
@@ -1874,6 +1908,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1874 } 1908 }
1875 1909
1876 idc->prev_state = idc->curr_state; 1910 idc->prev_state = idc->curr_state;
1911 qlcnic_sriov_vf_periodic_tasks(adapter);
1912
1877 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) 1913 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1878 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1914 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1879 idc->delay); 1915 idc->delay);
@@ -1897,7 +1933,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1897 if (!vf->sriov_vlans) 1933 if (!vf->sriov_vlans)
1898 return err; 1934 return err;
1899 1935
1900 mutex_lock(&vf->vlan_list_lock); 1936 spin_lock_bh(&vf->vlan_list_lock);
1901 1937
1902 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1938 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1903 if (vf->sriov_vlans[i] == vlan_id) { 1939 if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1942,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1906 } 1942 }
1907 } 1943 }
1908 1944
1909 mutex_unlock(&vf->vlan_list_lock); 1945 spin_unlock_bh(&vf->vlan_list_lock);
1910 return err; 1946 return err;
1911} 1947}
1912 1948
@@ -1915,12 +1951,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1915{ 1951{
1916 int err = 0; 1952 int err = 0;
1917 1953
1918 mutex_lock(&vf->vlan_list_lock); 1954 spin_lock_bh(&vf->vlan_list_lock);
1919 1955
1920 if (vf->num_vlan >= sriov->num_allowed_vlans) 1956 if (vf->num_vlan >= sriov->num_allowed_vlans)
1921 err = -EINVAL; 1957 err = -EINVAL;
1922 1958
1923 mutex_unlock(&vf->vlan_list_lock); 1959 spin_unlock_bh(&vf->vlan_list_lock);
1924 return err; 1960 return err;
1925} 1961}
1926 1962
@@ -1973,7 +2009,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1973 if (!vf->sriov_vlans) 2009 if (!vf->sriov_vlans)
1974 return; 2010 return;
1975 2011
1976 mutex_lock(&vf->vlan_list_lock); 2012 spin_lock_bh(&vf->vlan_list_lock);
1977 2013
1978 switch (opcode) { 2014 switch (opcode) {
1979 case QLC_VLAN_ADD: 2015 case QLC_VLAN_ADD:
@@ -1986,7 +2022,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1986 netdev_err(adapter->netdev, "Invalid VLAN operation\n"); 2022 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1987 } 2023 }
1988 2024
1989 mutex_unlock(&vf->vlan_list_lock); 2025 spin_unlock_bh(&vf->vlan_list_lock);
1990 return; 2026 return;
1991} 2027}
1992 2028
@@ -1994,10 +2030,12 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1994 u16 vid, u8 enable) 2030 u16 vid, u8 enable)
1995{ 2031{
1996 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 2032 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2033 struct net_device *netdev = adapter->netdev;
1997 struct qlcnic_vf_info *vf; 2034 struct qlcnic_vf_info *vf;
1998 struct qlcnic_cmd_args cmd; 2035 struct qlcnic_cmd_args cmd;
1999 int ret; 2036 int ret;
2000 2037
2038 memset(&cmd, 0, sizeof(cmd));
2001 if (vid == 0) 2039 if (vid == 0)
2002 return 0; 2040 return 0;
2003 2041
@@ -2019,14 +2057,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2019 dev_err(&adapter->pdev->dev, 2057 dev_err(&adapter->pdev->dev,
2020 "Failed to configure guest VLAN, err=%d\n", ret); 2058 "Failed to configure guest VLAN, err=%d\n", ret);
2021 } else { 2059 } else {
2060 netif_addr_lock_bh(netdev);
2022 qlcnic_free_mac_list(adapter); 2061 qlcnic_free_mac_list(adapter);
2062 netif_addr_unlock_bh(netdev);
2023 2063
2024 if (enable) 2064 if (enable)
2025 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); 2065 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2026 else 2066 else
2027 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); 2067 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2028 2068
2029 qlcnic_set_multi(adapter->netdev); 2069 netif_addr_lock_bh(netdev);
2070 qlcnic_set_multi(netdev);
2071 netif_addr_unlock_bh(netdev);
2030 } 2072 }
2031 2073
2032 qlcnic_free_mbx_args(&cmd); 2074 qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2199,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2157{ 2199{
2158 bool err = false; 2200 bool err = false;
2159 2201
2160 mutex_lock(&vf->vlan_list_lock); 2202 spin_lock_bh(&vf->vlan_list_lock);
2161 2203
2162 if (vf->num_vlan) 2204 if (vf->num_vlan)
2163 err = true; 2205 err = true;
2164 2206
2165 mutex_unlock(&vf->vlan_list_lock); 2207 spin_unlock_bh(&vf->vlan_list_lock);
2166 return err; 2208 return err;
2167} 2209}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 280137991544..a29538b86edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
16#define QLC_VF_FLOOD_BIT BIT_16 16#define QLC_VF_FLOOD_BIT BIT_16
17#define QLC_FLOOD_MODE 0x5 17#define QLC_FLOOD_MODE 0x5
18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19 18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
19#define QLC_INTR_COAL_TYPE_MASK 0x7
19 20
20static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); 21static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
21 22
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
83 info->max_tx_ques = res->num_tx_queues / max; 84 info->max_tx_ques = res->num_tx_queues / max;
84 85
85 if (qlcnic_83xx_pf_check(adapter)) 86 if (qlcnic_83xx_pf_check(adapter))
86 num_macs = 1; 87 num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
87 88
88 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 89 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
89 90
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
337 338
338 cmd.req.arg[1] = 0x4; 339 cmd.req.arg[1] = 0x4;
339 if (enable) { 340 if (enable) {
341 adapter->flags |= QLCNIC_VLAN_FILTERING;
340 cmd.req.arg[1] |= BIT_16; 342 cmd.req.arg[1] |= BIT_16;
341 if (qlcnic_84xx_check(adapter)) 343 if (qlcnic_84xx_check(adapter))
342 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0; 344 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
345 } else {
346 adapter->flags &= ~QLCNIC_VLAN_FILTERING;
343 } 347 }
344 348
345 err = qlcnic_issue_cmd(adapter, &cmd); 349 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
471 return -EPERM; 475 return -EPERM;
472 } 476 }
473 477
478 qlcnic_sriov_pf_disable(adapter);
479
474 rtnl_lock(); 480 rtnl_lock();
475 if (netif_running(netdev)) 481 if (netif_running(netdev))
476 __qlcnic_down(adapter, netdev); 482 __qlcnic_down(adapter, netdev);
477 483
478 qlcnic_sriov_pf_disable(adapter);
479
480 qlcnic_sriov_free_vlans(adapter); 484 qlcnic_sriov_free_vlans(adapter);
481 485
482 qlcnic_sriov_pf_cleanup(adapter); 486 qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
595 599
596 qlcnic_sriov_alloc_vlans(adapter); 600 qlcnic_sriov_alloc_vlans(adapter);
597 601
598 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
599 return err; 602 return err;
600 603
601del_flr_queue: 604del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
626 __qlcnic_down(adapter, netdev); 629 __qlcnic_down(adapter, netdev);
627 630
628 err = __qlcnic_pci_sriov_enable(adapter, num_vfs); 631 err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
629 if (err) { 632 if (err)
630 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", 633 goto error;
631 adapter->portnum);
632 634
633 err = -EIO; 635 if (netif_running(netdev))
634 if (qlcnic_83xx_configure_opmode(adapter)) 636 __qlcnic_up(adapter, netdev);
635 goto error; 637
636 } else { 638 rtnl_unlock();
639 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
640 if (!err) {
637 netdev_info(netdev, 641 netdev_info(netdev,
638 "SR-IOV is enabled successfully on port %d\n", 642 "SR-IOV is enabled successfully on port %d\n",
639 adapter->portnum); 643 adapter->portnum);
640 /* Return number of vfs enabled */ 644 /* Return number of vfs enabled */
641 err = num_vfs; 645 return num_vfs;
642 } 646 }
647
648 rtnl_lock();
643 if (netif_running(netdev)) 649 if (netif_running(netdev))
644 __qlcnic_up(adapter, netdev); 650 __qlcnic_down(adapter, netdev);
645 651
646error: 652error:
653 if (!qlcnic_83xx_configure_opmode(adapter)) {
654 if (netif_running(netdev))
655 __qlcnic_up(adapter, netdev);
656 }
657
647 rtnl_unlock(); 658 rtnl_unlock();
659 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
660 adapter->portnum);
661
648 return err; 662 return err;
649} 663}
650 664
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
773 struct qlcnic_vf_info *vf, 787 struct qlcnic_vf_info *vf,
774 u16 vlan, u8 op) 788 u16 vlan, u8 op)
775{ 789{
776 struct qlcnic_cmd_args cmd; 790 struct qlcnic_cmd_args *cmd;
777 struct qlcnic_macvlan_mbx mv; 791 struct qlcnic_macvlan_mbx mv;
778 struct qlcnic_vport *vp; 792 struct qlcnic_vport *vp;
779 u8 *addr; 793 u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
783 797
784 vp = vf->vp; 798 vp = vf->vp;
785 799
786 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN)) 800 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
801 if (!cmd)
787 return -ENOMEM; 802 return -ENOMEM;
788 803
804 err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
805 if (err)
806 goto free_cmd;
807
808 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
789 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); 809 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
790 if (vpid < 0) { 810 if (vpid < 0) {
791 err = -EINVAL; 811 err = -EINVAL;
792 goto out; 812 goto free_args;
793 } 813 }
794 814
795 if (vlan) 815 if (vlan)
796 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? 816 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
797 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); 817 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
798 818
799 cmd.req.arg[1] = op | (1 << 8) | (3 << 6); 819 cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
800 cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; 820 cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
801 821
802 addr = vp->mac; 822 addr = vp->mac;
803 mv.vlan = vlan; 823 mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
807 mv.mac_addr3 = addr[3]; 827 mv.mac_addr3 = addr[3];
808 mv.mac_addr4 = addr[4]; 828 mv.mac_addr4 = addr[4];
809 mv.mac_addr5 = addr[5]; 829 mv.mac_addr5 = addr[5];
810 buf = &cmd.req.arg[2]; 830 buf = &cmd->req.arg[2];
811 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 831 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
812 832
813 err = qlcnic_issue_cmd(adapter, &cmd); 833 err = qlcnic_issue_cmd(adapter, cmd);
814 834
815 if (err) 835 if (!err)
816 dev_err(&adapter->pdev->dev, 836 return err;
817 "MAC-VLAN %s to CAM failed, err=%d.\n",
818 ((op == 1) ? "add " : "delete "), err);
819 837
820out: 838free_args:
821 qlcnic_free_mbx_args(&cmd); 839 qlcnic_free_mbx_args(cmd);
840free_cmd:
841 kfree(cmd);
822 return err; 842 return err;
823} 843}
824 844
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
840 860
841 sriov = adapter->ahw->sriov; 861 sriov = adapter->ahw->sriov;
842 862
843 mutex_lock(&vf->vlan_list_lock); 863 spin_lock_bh(&vf->vlan_list_lock);
844 if (vf->num_vlan) { 864 if (vf->num_vlan) {
845 for (i = 0; i < sriov->num_allowed_vlans; i++) { 865 for (i = 0; i < sriov->num_allowed_vlans; i++) {
846 vlan = vf->sriov_vlans[i]; 866 vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
849 opcode); 869 opcode);
850 } 870 }
851 } 871 }
852 mutex_unlock(&vf->vlan_list_lock); 872 spin_unlock_bh(&vf->vlan_list_lock);
853 873
854 if (vf->vp->vlan_mode != QLC_PVID_MODE) { 874 if (vf->vp->vlan_mode != QLC_PVID_MODE) {
855 if (qlcnic_83xx_pf_check(adapter) && 875 if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
1178{ 1198{
1179 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; 1199 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
1180 u16 ctx_id, pkts, time; 1200 u16 ctx_id, pkts, time;
1201 int err = -EINVAL;
1202 u8 type;
1181 1203
1204 type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
1182 ctx_id = cmd->req.arg[1] >> 16; 1205 ctx_id = cmd->req.arg[1] >> 16;
1183 pkts = cmd->req.arg[2] & 0xffff; 1206 pkts = cmd->req.arg[2] & 0xffff;
1184 time = cmd->req.arg[2] >> 16; 1207 time = cmd->req.arg[2] >> 16;
1185 1208
1186 if (ctx_id != vf->rx_ctx_id) 1209 switch (type) {
1187 return -EINVAL; 1210 case QLCNIC_INTR_COAL_TYPE_RX:
1188 if (pkts > coal->rx_packets) 1211 if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
1189 return -EINVAL; 1212 time < coal->rx_time_us)
1190 if (time < coal->rx_time_us) 1213 goto err_label;
1191 return -EINVAL; 1214 break;
1215 case QLCNIC_INTR_COAL_TYPE_TX:
1216 if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
1217 time < coal->tx_time_us)
1218 goto err_label;
1219 break;
1220 default:
1221 netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
1222 type);
1223 return err;
1224 }
1192 1225
1193 return 0; 1226 return 0;
1227
1228err_label:
1229 netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
1230 vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
1231 vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
1232 netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
1233 ctx_id, pkts, time, type);
1234
1235 return err;
1194} 1236}
1195 1237
1196static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, 1238static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1214 struct qlcnic_vf_info *vf, 1256 struct qlcnic_vf_info *vf,
1215 struct qlcnic_cmd_args *cmd) 1257 struct qlcnic_cmd_args *cmd)
1216{ 1258{
1217 struct qlcnic_macvlan_mbx *macvlan;
1218 struct qlcnic_vport *vp = vf->vp; 1259 struct qlcnic_vport *vp = vf->vp;
1219 u8 op, new_op; 1260 u8 op, new_op;
1220 1261
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1224 cmd->req.arg[1] |= (vf->vp->handle << 16); 1265 cmd->req.arg[1] |= (vf->vp->handle << 16);
1225 cmd->req.arg[1] |= BIT_31; 1266 cmd->req.arg[1] |= BIT_31;
1226 1267
1227 macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
1228 if (!(macvlan->mac_addr0 & BIT_0)) {
1229 dev_err(&adapter->pdev->dev,
1230 "MAC address change is not allowed from VF %d",
1231 vf->pci_func);
1232 return -EINVAL;
1233 }
1234
1235 if (vp->vlan_mode == QLC_PVID_MODE) { 1268 if (vp->vlan_mode == QLC_PVID_MODE) {
1236 op = cmd->req.arg[1] & 0x7; 1269 op = cmd->req.arg[1] & 0x7;
1237 cmd->req.arg[1] &= ~0x7; 1270 cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1815 return 0; 1848 return 0;
1816} 1849}
1817 1850
1818int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate) 1851int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
1852 int min_tx_rate, int max_tx_rate)
1819{ 1853{
1820 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1854 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1821 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1855 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
1830 if (vf >= sriov->num_vfs) 1864 if (vf >= sriov->num_vfs)
1831 return -EINVAL; 1865 return -EINVAL;
1832 1866
1833 if (tx_rate >= 10000 || tx_rate < 100) { 1867 vf_info = &sriov->vf_info[vf];
1868 vp = vf_info->vp;
1869 vpid = vp->handle;
1870
1871 if (!min_tx_rate)
1872 min_tx_rate = QLC_VF_MIN_TX_RATE;
1873
1874 if (max_tx_rate &&
1875 (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
1834 netdev_err(netdev, 1876 netdev_err(netdev,
1835 "Invalid Tx rate, allowed range is [%d - %d]", 1877 "Invalid max Tx rate, allowed range is [%d - %d]",
1836 QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE); 1878 min_tx_rate, QLC_VF_MAX_TX_RATE);
1837 return -EINVAL; 1879 return -EINVAL;
1838 } 1880 }
1839 1881
1840 if (tx_rate == 0) 1882 if (!max_tx_rate)
1841 tx_rate = 10000; 1883 max_tx_rate = 10000;
1842 1884
1843 vf_info = &sriov->vf_info[vf]; 1885 if (min_tx_rate &&
1844 vp = vf_info->vp; 1886 (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
1845 vpid = vp->handle; 1887 netdev_err(netdev,
1888 "Invalid min Tx rate, allowed range is [%d - %d]",
1889 QLC_VF_MIN_TX_RATE, max_tx_rate);
1890 return -EINVAL;
1891 }
1846 1892
1847 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { 1893 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1848 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) 1894 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
1849 return -EIO; 1895 return -EIO;
1850 1896
1851 nic_info.max_tx_bw = tx_rate / 100; 1897 nic_info.max_tx_bw = max_tx_rate / 100;
1898 nic_info.min_tx_bw = min_tx_rate / 100;
1852 nic_info.bit_offsets = BIT_0; 1899 nic_info.bit_offsets = BIT_0;
1853 1900
1854 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) 1901 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
1855 return -EIO; 1902 return -EIO;
1856 } 1903 }
1857 1904
1858 vp->max_tx_bw = tx_rate / 100; 1905 vp->max_tx_bw = max_tx_rate / 100;
1859 netdev_info(netdev, 1906 netdev_info(netdev,
1860 "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", 1907 "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1861 tx_rate, vp->max_tx_bw, vf); 1908 max_tx_rate, vp->max_tx_bw, vf);
1909 vp->min_tx_bw = min_tx_rate / 100;
1910 netdev_info(netdev,
1911 "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1912 min_tx_rate, vp->min_tx_bw, vf);
1862 return 0; 1913 return 0;
1863} 1914}
1864 1915
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1957 ivi->qos = vp->qos; 2008 ivi->qos = vp->qos;
1958 ivi->spoofchk = vp->spoofchk; 2009 ivi->spoofchk = vp->spoofchk;
1959 if (vp->max_tx_bw == MAX_BW) 2010 if (vp->max_tx_bw == MAX_BW)
1960 ivi->tx_rate = 0; 2011 ivi->max_tx_rate = 0;
2012 else
2013 ivi->max_tx_rate = vp->max_tx_bw * 100;
2014 if (vp->min_tx_bw == MIN_BW)
2015 ivi->min_tx_rate = 0;
1961 else 2016 else
1962 ivi->tx_rate = vp->max_tx_bw * 100; 2017 ivi->min_tx_rate = vp->min_tx_bw * 100;
1963 2018
1964 ivi->vf = vf; 2019 ivi->vf = vf;
1965 return 0; 2020 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e27f2e1..f5786d5792df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20#include <linux/aer.h> 20#include <linux/aer.h>
21#include <linux/log2.h> 21#include <linux/log2.h>
22#ifdef CONFIG_QLCNIC_HWMON
23#include <linux/hwmon.h>
24#include <linux/hwmon-sysfs.h>
25#endif
22 26
23#define QLC_STATUS_UNSUPPORTED_CMD -2 27#define QLC_STATUS_UNSUPPORTED_CMD -2
24 28
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
358 if (adapter->npars[i].pci_func == pci_func) 362 if (adapter->npars[i].pci_func == pci_func)
359 return i; 363 return i;
360 } 364 }
365
366 dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
361 return -EINVAL; 367 return -EINVAL;
362} 368}
363 369
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
1243 .write = qlcnic_83xx_sysfs_flash_write_handler, 1249 .write = qlcnic_83xx_sysfs_flash_write_handler,
1244}; 1250};
1245 1251
1252#ifdef CONFIG_QLCNIC_HWMON
1253
1254static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
1255 struct device_attribute *dev_attr,
1256 char *buf)
1257{
1258 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1259 unsigned int temperature = 0, value = 0;
1260
1261 if (qlcnic_83xx_check(adapter))
1262 value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
1263 else if (qlcnic_82xx_check(adapter))
1264 value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
1265
1266 temperature = qlcnic_get_temp_val(value);
1267 /* display millidegree celcius */
1268 temperature *= 1000;
1269 return sprintf(buf, "%u\n", temperature);
1270}
1271
1272/* hwmon-sysfs attributes */
1273static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
1274 qlcnic_hwmon_show_temp, NULL, 1);
1275
1276static struct attribute *qlcnic_hwmon_attrs[] = {
1277 &sensor_dev_attr_temp1_input.dev_attr.attr,
1278 NULL
1279};
1280
1281ATTRIBUTE_GROUPS(qlcnic_hwmon);
1282
1283void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
1284{
1285 struct device *dev = &adapter->pdev->dev;
1286 struct device *hwmon_dev;
1287
1288 /* Skip hwmon registration for a VF device */
1289 if (qlcnic_sriov_vf_check(adapter)) {
1290 adapter->ahw->hwmon_dev = NULL;
1291 return;
1292 }
1293 hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
1294 adapter,
1295 qlcnic_hwmon_groups);
1296 if (IS_ERR(hwmon_dev)) {
1297 dev_err(dev, "Cannot register with hwmon, err=%ld\n",
1298 PTR_ERR(hwmon_dev));
1299 hwmon_dev = NULL;
1300 }
1301 adapter->ahw->hwmon_dev = hwmon_dev;
1302}
1303
1304void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
1305{
1306 struct device *hwmon_dev = adapter->ahw->hwmon_dev;
1307 if (hwmon_dev) {
1308 hwmon_device_unregister(hwmon_dev);
1309 adapter->ahw->hwmon_dev = NULL;
1310 }
1311}
1312#endif
1313
1246void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) 1314void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
1247{ 1315{
1248 struct device *dev = &adapter->pdev->dev; 1316 struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76acab81..b40050e03a56 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
3595 } 3595 }
3596 return status; 3596 return status;
3597err_irq: 3597err_irq:
3598 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); 3598 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3599 ql_free_irq(qdev); 3599 ql_free_irq(qdev);
3600 return status; 3600 return status;
3601} 3601}
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
4770 ndev->irq = pdev->irq; 4770 ndev->irq = pdev->irq;
4771 4771
4772 ndev->netdev_ops = &qlge_netdev_ops; 4772 ndev->netdev_ops = &qlge_netdev_ops;
4773 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); 4773 ndev->ethtool_ops = &qlge_ethtool_ops;
4774 ndev->watchdog_timeo = 10 * HZ; 4774 ndev->watchdog_timeo = 10 * HZ;
4775 4775
4776 err = register_netdev(ndev); 4776 err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index aa1c079f231d..be425ad5e824 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7125 for (i = 0; i < ETH_ALEN; i++) 7125 for (i = 0; i < ETH_ALEN; i++)
7126 dev->dev_addr[i] = RTL_R8(MAC0 + i); 7126 dev->dev_addr[i] = RTL_R8(MAC0 + i);
7127 7127
7128 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); 7128 dev->ethtool_ops = &rtl8169_ethtool_ops;
7129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT; 7129 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
7130 7130
7131 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 7131 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a9509ccd33b..7622213beef1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
307}; 307};
308 308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
310 [ECMR] = 0x0160, 331 [ECMR] = 0x0160,
311 [ECSR] = 0x0164, 332 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168, 333 [ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
546 .register_type = SH_ETH_REG_FAST_SH4, 567 .register_type = SH_ETH_REG_FAST_SH4,
547 568
548 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549 .rmcr_value = RMCR_RNC,
550 570
551 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
552 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
624 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
625 EESR_TDE | EESR_ECI, 645 EESR_TDE | EESR_ECI,
626 .fdr_value = 0x0000072f, 646 .fdr_value = 0x0000072f,
627 .rmcr_value = RMCR_RNC,
628 647
629 .irq_flags = IRQF_SHARED, 648 .irq_flags = IRQF_SHARED,
630 .apr = 1, 649 .apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
752 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
753 EESR_TDE | EESR_ECI, 772 EESR_TDE | EESR_ECI,
754 .fdr_value = 0x0000070f, 773 .fdr_value = 0x0000070f,
755 .rmcr_value = RMCR_RNC,
756 774
757 .apr = 1, 775 .apr = 1,
758 .mpr = 1, 776 .mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
784 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
785 EESR_TDE | EESR_ECI, 803 EESR_TDE | EESR_ECI,
786 .fdr_value = 0x0000070f, 804 .fdr_value = 0x0000070f,
787 .rmcr_value = RMCR_RNC,
788 805
789 .no_psr = 1, 806 .no_psr = 1,
790 .apr = 1, 807 .apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
833 if (!cd->fdr_value) 850 if (!cd->fdr_value)
834 cd->fdr_value = DEFAULT_FDR_INIT; 851 cd->fdr_value = DEFAULT_FDR_INIT;
835 852
836 if (!cd->rmcr_value)
837 cd->rmcr_value = DEFAULT_RMCR_VALUE;
838
839 if (!cd->tx_check) 853 if (!cd->tx_check)
840 cd->tx_check = DEFAULT_TX_CHECK; 854 cd->tx_check = DEFAULT_TX_CHECK;
841 855
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1287 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1288 sh_eth_write(ndev, 0, TFTR); 1302 sh_eth_write(ndev, 0, TFTR);
1289 1303
1290 /* Frame recv control */ 1304 /* Frame recv control (enable multiple-packets per rx irq) */
1291 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
1292 1306
1293 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1307 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1294 1308
@@ -1385,7 +1399,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1385 int entry = mdp->cur_rx % mdp->num_rx_ring; 1399 int entry = mdp->cur_rx % mdp->num_rx_ring;
1386 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1400 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1387 struct sk_buff *skb; 1401 struct sk_buff *skb;
1388 int exceeded = 0;
1389 u16 pkt_len = 0; 1402 u16 pkt_len = 0;
1390 u32 desc_status; 1403 u32 desc_status;
1391 1404
@@ -1397,10 +1410,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 if (--boguscnt < 0) 1410 if (--boguscnt < 0)
1398 break; 1411 break;
1399 1412
1400 if (*quota <= 0) { 1413 if (*quota <= 0)
1401 exceeded = 1;
1402 break; 1414 break;
1403 } 1415
1404 (*quota)--; 1416 (*quota)--;
1405 1417
1406 if (!(desc_status & RDFEND)) 1418 if (!(desc_status & RDFEND))
@@ -1448,7 +1460,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 ndev->stats.rx_packets++; 1460 ndev->stats.rx_packets++;
1449 ndev->stats.rx_bytes += pkt_len; 1461 ndev->stats.rx_bytes += pkt_len;
1450 } 1462 }
1451 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1452 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1463 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1453 rxdesc = &mdp->rx_ring[entry]; 1464 rxdesc = &mdp->rx_ring[entry];
1454 } 1465 }
@@ -1494,7 +1505,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1494 sh_eth_write(ndev, EDRRR_R, EDRRR); 1505 sh_eth_write(ndev, EDRRR_R, EDRRR);
1495 } 1506 }
1496 1507
1497 return exceeded; 1508 return *quota <= 0;
1498} 1509}
1499 1510
1500static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1511static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -2627,8 +2638,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
2627 pdev->name, pdev->id); 2638 pdev->name, pdev->id);
2628 2639
2629 /* PHY IRQ */ 2640 /* PHY IRQ */
2630 mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR, 2641 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2631 GFP_KERNEL); 2642 GFP_KERNEL);
2632 if (!mdp->mii_bus->irq) { 2643 if (!mdp->mii_bus->irq) {
2633 ret = -ENOMEM; 2644 ret = -ENOMEM;
2634 goto out_free_bus; 2645 goto out_free_bus;
@@ -2843,7 +2854,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2843 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; 2854 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2844 else 2855 else
2845 ndev->netdev_ops = &sh_eth_netdev_ops; 2856 ndev->netdev_ops = &sh_eth_netdev_ops;
2846 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2857 ndev->ethtool_ops = &sh_eth_ethtool_ops;
2847 ndev->watchdog_timeo = TX_TIMEOUT; 2858 ndev->watchdog_timeo = TX_TIMEOUT;
2848 2859
2849 /* debug message level */ 2860 /* debug message level */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
319enum RMCR_BIT { 319enum RMCR_BIT {
320 RMCR_RNC = 0x00000001, 320 RMCR_RNC = 0x00000001,
321}; 321};
322#define DEFAULT_RMCR_VALUE 0x00000000
323 322
324/* ECMR */ 323/* ECMR */
325enum FELIC_MODE_BIT { 324enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
466 unsigned long fdr_value; 465 unsigned long fdr_value;
467 unsigned long fcftr_value; 466 unsigned long fcftr_value;
468 unsigned long rpadir_value; 467 unsigned long rpadir_value;
469 unsigned long rmcr_value;
470 468
471 /* interrupt checking mask */ 469 /* interrupt checking mask */
472 unsigned long tx_check; 470 unsigned long tx_check;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0415fa50eeb7..c0981ae45874 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
520 520
521void sxgbe_set_ethtool_ops(struct net_device *netdev) 521void sxgbe_set_ethtool_ops(struct net_device *netdev)
522{ 522{
523 SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops); 523 netdev->ethtool_ops = &sxgbe_ethtool_ops;
524} 524}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a983869f..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
425 * @rx_rsize: ring size 425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor 426 * Description: this function initializes the DMA RX descriptor
427 */ 427 */
428void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, 428static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
429 int rx_rsize) 429 int rx_rsize)
430{ 430{
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy); 432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
519 * @tx_rsize: ring size 519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor 520 * Description: this function initializes the DMA TX descriptor
521 */ 521 */
522void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring, 522static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
523 int tx_rsize) 523 int tx_rsize)
524{ 524{
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy); 526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
1221 1221
1222 return 0; 1222 return 0;
1223} 1223}
1224
1225/* Prepare first Tx descriptor for doing TSO operation */ 1224/* Prepare first Tx descriptor for doing TSO operation */
1226void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, 1225static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1227 struct sxgbe_tx_norm_desc *first_desc, 1226 struct sxgbe_tx_norm_desc *first_desc,
1228 struct sk_buff *skb) 1227 struct sk_buff *skb)
1229{ 1228{
1230 unsigned int total_hdr_len, tcp_hdr_len; 1229 unsigned int total_hdr_len, tcp_hdr_len;
1231 1230
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
1914 readl(ioaddr + SXGBE_HASH_LOW)); 1913 readl(ioaddr + SXGBE_HASH_LOW));
1915} 1914}
1916 1915
1917/**
1918 * sxgbe_config - entry point for changing configuration mode passed on by
1919 * ifconfig
1920 * @dev : pointer to the device structure
1921 * @map : pointer to the device mapping structure
1922 * Description:
1923 * This function is a driver entry point which gets called by the kernel
1924 * whenever some device configuration is changed.
1925 * Return value:
1926 * This function returns 0 if success and appropriate error otherwise.
1927 */
1928static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1929{
1930 struct sxgbe_priv_data *priv = netdev_priv(dev);
1931
1932 /* Can't act on a running interface */
1933 if (dev->flags & IFF_UP)
1934 return -EBUSY;
1935
1936 /* Don't allow changing the I/O address */
1937 if (map->base_addr != (unsigned long)priv->ioaddr) {
1938 netdev_warn(dev, "can't change I/O address\n");
1939 return -EOPNOTSUPP;
1940 }
1941
1942 /* Don't allow changing the IRQ */
1943 if (map->irq != priv->irq) {
1944 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1945 return -EOPNOTSUPP;
1946 }
1947
1948 return 0;
1949}
1950
1951#ifdef CONFIG_NET_POLL_CONTROLLER 1916#ifdef CONFIG_NET_POLL_CONTROLLER
1952/** 1917/**
1953 * sxgbe_poll_controller - entry point for polling receive by device 1918 * sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
2009 .ndo_set_rx_mode = sxgbe_set_rx_mode, 1974 .ndo_set_rx_mode = sxgbe_set_rx_mode,
2010 .ndo_tx_timeout = sxgbe_tx_timeout, 1975 .ndo_tx_timeout = sxgbe_tx_timeout,
2011 .ndo_do_ioctl = sxgbe_ioctl, 1976 .ndo_do_ioctl = sxgbe_ioctl,
2012 .ndo_set_config = sxgbe_config,
2013#ifdef CONFIG_NET_POLL_CONTROLLER 1977#ifdef CONFIG_NET_POLL_CONTROLLER
2014 .ndo_poll_controller = sxgbe_poll_controller, 1978 .ndo_poll_controller = sxgbe_poll_controller,
2015#endif 1979#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 56f8bf5a3f1b..81437d91df99 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -188,7 +188,6 @@
188 188
189/* L3/L4 function registers */ 189/* L3/L4 function registers */
190#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00 190#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
191#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
192#define SXGBE_CORE_L34_DATA_REG 0x0C04 191#define SXGBE_CORE_L34_DATA_REG 0x0C04
193 192
194/* ARP registers */ 193/* ARP registers */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 63d595fd3cc5..1e274045970f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
2248 } else { 2248 } else {
2249 net_dev->netdev_ops = &efx_farch_netdev_ops; 2249 net_dev->netdev_ops = &efx_farch_netdev_ops;
2250 } 2250 }
2251 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2251 net_dev->ethtool_ops = &efx_ethtool_ops;
2252 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2252 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2253 2253
2254 rtnl_lock(); 2254 rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 0de8b07c24c2..74739c4b9997 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1033 0 : ARRAY_SIZE(efx->rx_indir_table)); 1033 0 : ARRAY_SIZE(efx->rx_indir_table));
1034} 1034}
1035 1035
1036static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) 1036static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
1037{ 1037{
1038 struct efx_nic *efx = netdev_priv(net_dev); 1038 struct efx_nic *efx = netdev_priv(net_dev);
1039 1039
@@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, 1044static int efx_ethtool_set_rxfh(struct net_device *net_dev,
1045 const u32 *indir) 1045 const u32 *indir, const u8 *key)
1046{ 1046{
1047 struct efx_nic *efx = netdev_priv(net_dev); 1047 struct efx_nic *efx = netdev_priv(net_dev);
1048 1048
@@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = {
1125 .get_rxnfc = efx_ethtool_get_rxnfc, 1125 .get_rxnfc = efx_ethtool_get_rxnfc,
1126 .set_rxnfc = efx_ethtool_set_rxnfc, 1126 .set_rxnfc = efx_ethtool_set_rxnfc,
1127 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1127 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1128 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1128 .get_rxfh = efx_ethtool_get_rxfh,
1129 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1129 .set_rxfh = efx_ethtool_set_rxfh,
1130 .get_ts_info = efx_ethtool_get_ts_info, 1130 .get_ts_info = efx_ethtool_get_ts_info,
1131 .get_module_info = efx_ethtool_get_module_info, 1131 .get_module_info = efx_ethtool_get_module_info,
1132 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1132 .get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4d3f119b67b3..afb94aa2c15e 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,10 +66,17 @@
66#define EFX_USE_QWORD_IO 1 66#define EFX_USE_QWORD_IO 1
67#endif 67#endif
68 68
69/* Hardware issue requires that only 64-bit naturally aligned writes
70 * are seen by hardware. Its not strictly necessary to restrict to
71 * x86_64 arch, but done for safety since unusual write combining behaviour
72 * can break PIO.
73 */
74#ifdef CONFIG_X86_64
69/* PIO is a win only if write-combining is possible */ 75/* PIO is a win only if write-combining is possible */
70#ifdef ARCH_HAS_IOREMAP_WC 76#ifdef ARCH_HAS_IOREMAP_WC
71#define EFX_USE_PIO 1 77#define EFX_USE_PIO 1
72#endif 78#endif
79#endif
73 80
74#ifdef EFX_USE_QWORD_IO 81#ifdef EFX_USE_QWORD_IO
75static inline void _efx_writeq(struct efx_nic *efx, __le64 value, 82static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9a9205e77896..43d2e64546ed 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1633 1633
1634 ivi->vf = vf_i; 1634 ivi->vf = vf_i;
1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr); 1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1636 ivi->tx_rate = 0; 1636 ivi->max_tx_rate = 0;
1637 ivi->min_tx_rate = 0;
1637 tci = ntohs(vf->addr.tci); 1638 tci = ntohs(vf->addr.tci);
1638 ivi->vlan = tci & VLAN_VID_MASK; 1639 ivi->vlan = tci & VLAN_VID_MASK;
1639 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; 1640 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa9475300411..ede8dcca0ff3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
189 u8 buf[L1_CACHE_BYTES]; 189 u8 buf[L1_CACHE_BYTES];
190}; 190};
191 191
192/* Copy in explicit 64-bit writes. */
193static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
194{
195 u64 *src64 = src;
196 u64 __iomem *dest64 = dest;
197 size_t l64 = len / 8;
198 size_t i;
199
200 for (i = 0; i < l64; i++)
201 writeq(src64[i], &dest64[i]);
202}
203
192/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 204/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
193 * Advances piobuf pointer. Leaves additional data in the copy buffer. 205 * Advances piobuf pointer. Leaves additional data in the copy buffer.
194 */ 206 */
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
198{ 210{
199 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 211 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
200 212
201 memcpy_toio(*piobuf, data, block_len); 213 efx_memcpy_64(*piobuf, data, block_len);
202 *piobuf += block_len; 214 *piobuf += block_len;
203 len -= block_len; 215 len -= block_len;
204 216
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
230 if (copy_buf->used < sizeof(copy_buf->buf)) 242 if (copy_buf->used < sizeof(copy_buf->buf))
231 return; 243 return;
232 244
233 memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 245 efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
234 *piobuf += sizeof(copy_buf->buf); 246 *piobuf += sizeof(copy_buf->buf);
235 data += copy_to_buf; 247 data += copy_to_buf;
236 len -= copy_to_buf; 248 len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
245{ 257{
246 /* if there's anything in it, write the whole buffer, including junk */ 258 /* if there's anything in it, write the whole buffer, including junk */
247 if (copy_buf->used) 259 if (copy_buf->used)
248 memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 260 efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
249} 261}
250 262
251/* Traverse skb structure and copy fragments in to PIO buffer. 263/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
304 */ 316 */
305 BUILD_BUG_ON(L1_CACHE_BYTES > 317 BUILD_BUG_ON(L1_CACHE_BYTES >
306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 318 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
307 memcpy_toio(tx_queue->piobuf, skb->data, 319 efx_memcpy_64(tx_queue->piobuf, skb->data,
308 ALIGN(skb->len, L1_CACHE_BYTES)); 320 ALIGN(skb->len, L1_CACHE_BYTES));
309 } 321 }
310 322
311 EFX_POPULATE_QWORD_5(buffer->option, 323 EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index acbbe48a519c..a86339903b9b 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
1877 1877
1878 dev->netdev_ops = &sis190_netdev_ops; 1878 dev->netdev_ops = &sis190_netdev_ops;
1879 1879
1880 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1880 dev->ethtool_ops = &sis190_ethtool_ops;
1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1882 1882
1883 spin_lock_init(&tp->lock); 1883 spin_lock_init(&tp->lock);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index c7a4868571f9..6b33127ab352 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
318 318
319 /* The SMC91c92-specific entries in the device structure. */ 319 /* The SMC91c92-specific entries in the device structure. */
320 dev->netdev_ops = &smc_netdev_ops; 320 dev->netdev_ops = &smc_netdev_ops;
321 SET_ETHTOOL_OPS(dev, &ethtool_ops); 321 dev->ethtool_ops = &ethtool_ops;
322 dev->watchdog_timeo = TX_TIMEOUT; 322 dev->watchdog_timeo = TX_TIMEOUT;
323 323
324 smc->mii_if.dev = dev; 324 smc->mii_if.dev = dev;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2477 goto out_disable_resources; 2477 goto out_disable_resources;
2478 } 2478 }
2479 2479
2480 netif_carrier_off(dev);
2481
2480 retval = register_netdev(dev); 2482 retval = register_netdev(dev);
2481 if (retval) { 2483 if (retval) {
2482 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2484 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5f9cb85c8ef..c62e67f3c2f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
322 return -EBUSY; 322 return -EBUSY;
323 } 323 }
324 cmd->transceiver = XCVR_INTERNAL; 324 cmd->transceiver = XCVR_INTERNAL;
325 spin_lock_irq(&priv->lock);
326 rc = phy_ethtool_gset(phy, cmd); 325 rc = phy_ethtool_gset(phy, cmd);
327 spin_unlock_irq(&priv->lock);
328 return rc; 326 return rc;
329} 327}
330 328
@@ -431,8 +429,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
431 if (priv->pcs) /* FIXME */ 429 if (priv->pcs) /* FIXME */
432 return; 430 return;
433 431
434 spin_lock(&priv->lock);
435
436 pause->rx_pause = 0; 432 pause->rx_pause = 0;
437 pause->tx_pause = 0; 433 pause->tx_pause = 0;
438 pause->autoneg = priv->phydev->autoneg; 434 pause->autoneg = priv->phydev->autoneg;
@@ -442,7 +438,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
442 if (priv->flow_ctrl & FLOW_TX) 438 if (priv->flow_ctrl & FLOW_TX)
443 pause->tx_pause = 1; 439 pause->tx_pause = 1;
444 440
445 spin_unlock(&priv->lock);
446} 441}
447 442
448static int 443static int
@@ -457,8 +452,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
457 if (priv->pcs) /* FIXME */ 452 if (priv->pcs) /* FIXME */
458 return -EOPNOTSUPP; 453 return -EOPNOTSUPP;
459 454
460 spin_lock(&priv->lock);
461
462 if (pause->rx_pause) 455 if (pause->rx_pause)
463 new_pause |= FLOW_RX; 456 new_pause |= FLOW_RX;
464 if (pause->tx_pause) 457 if (pause->tx_pause)
@@ -473,7 +466,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
473 } else 466 } else
474 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex, 467 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
475 priv->flow_ctrl, priv->pause); 468 priv->flow_ctrl, priv->pause);
476 spin_unlock(&priv->lock);
477 return ret; 469 return ret;
478} 470}
479 471
@@ -784,5 +776,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
784 776
785void stmmac_set_ethtool_ops(struct net_device *netdev) 777void stmmac_set_ethtool_ops(struct net_device *netdev)
786{ 778{
787 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); 779 netdev->ethtool_ops = &stmmac_ethtool_ops;
788} 780}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f4841d2e8dc..057a1208e594 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1753,7 +1753,7 @@ static int stmmac_open(struct net_device *dev)
1753 } 1753 }
1754 1754
1755 /* Request the IRQ lines */ 1755 /* Request the IRQ lines */
1756 if (priv->lpi_irq != -ENXIO) { 1756 if (priv->lpi_irq > 0) {
1757 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 1757 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1758 dev->name, dev); 1758 dev->name, dev);
1759 if (unlikely(ret < 0)) { 1759 if (unlikely(ret < 0)) {
@@ -1813,7 +1813,7 @@ static int stmmac_release(struct net_device *dev)
1813 free_irq(dev->irq, dev); 1813 free_irq(dev->irq, dev);
1814 if (priv->wol_irq != dev->irq) 1814 if (priv->wol_irq != dev->irq)
1815 free_irq(priv->wol_irq, dev); 1815 free_irq(priv->wol_irq, dev);
1816 if (priv->lpi_irq != -ENXIO) 1816 if (priv->lpi_irq > 0)
1817 free_irq(priv->lpi_irq, dev); 1817 free_irq(priv->lpi_irq, dev);
1818 1818
1819 /* Stop TX/RX DMA and clear the descriptors */ 1819 /* Stop TX/RX DMA and clear the descriptors */
@@ -2212,27 +2212,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
2212 stmmac_tx_err(priv); 2212 stmmac_tx_err(priv);
2213} 2213}
2214 2214
2215/* Configuration changes (passed on by ifconfig) */
2216static int stmmac_config(struct net_device *dev, struct ifmap *map)
2217{
2218 if (dev->flags & IFF_UP) /* can't act on a running interface */
2219 return -EBUSY;
2220
2221 /* Don't allow changing the I/O address */
2222 if (map->base_addr != dev->base_addr) {
2223 pr_warn("%s: can't change I/O address\n", dev->name);
2224 return -EOPNOTSUPP;
2225 }
2226
2227 /* Don't allow changing the IRQ */
2228 if (map->irq != dev->irq) {
2229 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2230 return -EOPNOTSUPP;
2231 }
2232
2233 return 0;
2234}
2235
2236/** 2215/**
2237 * stmmac_set_rx_mode - entry point for multicast addressing 2216 * stmmac_set_rx_mode - entry point for multicast addressing
2238 * @dev : pointer to the device structure 2217 * @dev : pointer to the device structure
@@ -2598,7 +2577,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
2598 .ndo_set_rx_mode = stmmac_set_rx_mode, 2577 .ndo_set_rx_mode = stmmac_set_rx_mode,
2599 .ndo_tx_timeout = stmmac_tx_timeout, 2578 .ndo_tx_timeout = stmmac_tx_timeout,
2600 .ndo_do_ioctl = stmmac_ioctl, 2579 .ndo_do_ioctl = stmmac_ioctl,
2601 .ndo_set_config = stmmac_config,
2602#ifdef CONFIG_NET_POLL_CONTROLLER 2580#ifdef CONFIG_NET_POLL_CONTROLLER
2603 .ndo_poll_controller = stmmac_poll_controller, 2581 .ndo_poll_controller = stmmac_poll_controller,
2604#endif 2582#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb107823..a5b1e1b776fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
205 if (new_bus == NULL) 205 if (new_bus == NULL)
206 return -ENOMEM; 206 return -ENOMEM;
207 207
208 if (mdio_bus_data->irqs) 208 if (mdio_bus_data->irqs) {
209 irqlist = mdio_bus_data->irqs; 209 irqlist = mdio_bus_data->irqs;
210 else 210 } else {
211 for (addr = 0; addr < PHY_MAX_ADDR; addr++)
212 priv->mii_irq[addr] = PHY_POLL;
211 irqlist = priv->mii_irq; 213 irqlist = priv->mii_irq;
214 }
212 215
213#ifdef CONFIG_OF 216#ifdef CONFIG_OF
214 if (priv->device->of_node) 217 if (priv->device->of_node)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 46aef5108bea..ea7a65be1f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -237,10 +237,12 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
237 237
238 /* Get the MAC information */ 238 /* Get the MAC information */
239 priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); 239 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
240 if (priv->dev->irq == -ENXIO) { 240 if (priv->dev->irq < 0) {
241 pr_err("%s: ERROR: MAC IRQ configuration " 241 if (priv->dev->irq != -EPROBE_DEFER) {
242 "information not found\n", __func__); 242 netdev_err(priv->dev,
243 return -ENXIO; 243 "MAC IRQ configuration information not found\n");
244 }
245 return priv->dev->irq;
244 } 246 }
245 247
246 /* 248 /*
@@ -252,10 +254,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
252 * so the driver will continue to use the mac irq (ndev->irq) 254 * so the driver will continue to use the mac irq (ndev->irq)
253 */ 255 */
254 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 256 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
255 if (priv->wol_irq == -ENXIO) 257 if (priv->wol_irq < 0) {
258 if (priv->wol_irq == -EPROBE_DEFER)
259 return -EPROBE_DEFER;
256 priv->wol_irq = priv->dev->irq; 260 priv->wol_irq = priv->dev->irq;
261 }
257 262
258 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 263 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
264 if (priv->lpi_irq == -EPROBE_DEFER)
265 return -EPROBE_DEFER;
259 266
260 platform_set_drvdata(pdev, priv->dev); 267 platform_set_drvdata(pdev, priv->dev);
261 268
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 2ead87759ab4..38da73a2a886 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
2413 .get_ethtool_stats = bdx_get_ethtool_stats, 2413 .get_ethtool_stats = bdx_get_ethtool_stats,
2414 }; 2414 };
2415 2415
2416 SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops); 2416 netdev->ethtool_ops = &bdx_ethtool_ops;
2417} 2417}
2418 2418
2419/** 2419/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f369437..7399a52f7c26 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
313 313
314static struct mii_bus *cpmac_mii; 314static struct mii_bus *cpmac_mii;
315 315
316static int cpmac_config(struct net_device *dev, struct ifmap *map)
317{
318 if (dev->flags & IFF_UP)
319 return -EBUSY;
320
321 /* Don't allow changing the I/O address */
322 if (map->base_addr != dev->base_addr)
323 return -EOPNOTSUPP;
324
325 /* ignore other fields */
326 return 0;
327}
328
329static void cpmac_set_multicast_list(struct net_device *dev) 316static void cpmac_set_multicast_list(struct net_device *dev)
330{ 317{
331 struct netdev_hw_addr *ha; 318 struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
1100 .ndo_tx_timeout = cpmac_tx_timeout, 1087 .ndo_tx_timeout = cpmac_tx_timeout,
1101 .ndo_set_rx_mode = cpmac_set_multicast_list, 1088 .ndo_set_rx_mode = cpmac_set_multicast_list,
1102 .ndo_do_ioctl = cpmac_ioctl, 1089 .ndo_do_ioctl = cpmac_ioctl,
1103 .ndo_set_config = cpmac_config,
1104 .ndo_change_mtu = eth_change_mtu, 1090 .ndo_change_mtu = eth_change_mtu,
1105 .ndo_validate_addr = eth_validate_addr, 1091 .ndo_validate_addr = eth_validate_addr,
1106 .ndo_set_mac_address = eth_mac_addr, 1092 .ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 148da9ae8366..aa8bf45e53dc 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -29,6 +29,8 @@
29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) 29#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) 30#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
31 31
32#define GMII_SEL_MODE_MASK 0x3
33
32struct cpsw_phy_sel_priv { 34struct cpsw_phy_sel_priv {
33 struct device *dev; 35 struct device *dev;
34 u32 __iomem *gmii_sel; 36 u32 __iomem *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
65 break; 67 break;
66 }; 68 };
67 69
68 mask = 0x3 << (slave * 2) | BIT(slave + 6); 70 mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
69 mode <<= slave * 2; 71 mode <<= slave * 2;
70 72
71 if (priv->rmii_clock_external) { 73 if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
81 writel(reg, priv->gmii_sel); 83 writel(reg, priv->gmii_sel);
82} 84}
83 85
86static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
87 phy_interface_t phy_mode, int slave)
88{
89 u32 reg;
90 u32 mask;
91 u32 mode = 0;
92
93 reg = readl(priv->gmii_sel);
94
95 switch (phy_mode) {
96 case PHY_INTERFACE_MODE_RMII:
97 mode = AM33XX_GMII_SEL_MODE_RMII;
98 break;
99
100 case PHY_INTERFACE_MODE_RGMII:
101 case PHY_INTERFACE_MODE_RGMII_ID:
102 case PHY_INTERFACE_MODE_RGMII_RXID:
103 case PHY_INTERFACE_MODE_RGMII_TXID:
104 mode = AM33XX_GMII_SEL_MODE_RGMII;
105 break;
106
107 case PHY_INTERFACE_MODE_MII:
108 default:
109 mode = AM33XX_GMII_SEL_MODE_MII;
110 break;
111 };
112
113 switch (slave) {
114 case 0:
115 mask = GMII_SEL_MODE_MASK;
116 break;
117 case 1:
118 mask = GMII_SEL_MODE_MASK << 4;
119 mode <<= 4;
120 break;
121 default:
122 dev_err(priv->dev, "invalid slave number...\n");
123 return;
124 }
125
126 if (priv->rmii_clock_external)
127 dev_err(priv->dev, "RMII External clock is not supported\n");
128
129 reg &= ~mask;
130 reg |= mode;
131
132 writel(reg, priv->gmii_sel);
133}
134
84static struct platform_driver cpsw_phy_sel_driver; 135static struct platform_driver cpsw_phy_sel_driver;
85static int match(struct device *dev, void *data) 136static int match(struct device *dev, void *data)
86{ 137{
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
112 .compatible = "ti,am3352-cpsw-phy-sel", 163 .compatible = "ti,am3352-cpsw-phy-sel",
113 .data = &cpsw_gmii_sel_am3352, 164 .data = &cpsw_gmii_sel_am3352,
114 }, 165 },
166 {
167 .compatible = "ti,dra7xx-cpsw-phy-sel",
168 .data = &cpsw_gmii_sel_dra7xx,
169 },
170 {
171 .compatible = "ti,am43xx-cpsw-phy-sel",
172 .data = &cpsw_gmii_sel_am3352,
173 },
115 {} 174 {}
116}; 175};
117MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table); 176MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
132 return -ENOMEM; 191 return -ENOMEM;
133 } 192 }
134 193
194 priv->dev = &pdev->dev;
135 priv->cpsw_phy_sel = of_id->data; 195 priv->cpsw_phy_sel = of_id->data;
136 196
137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel"); 197 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7ebc812..ff380dac6629 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -143,13 +143,13 @@ do { \
143 u32 i; \ 143 u32 i; \
144 for (i = 0; i < priv->num_irqs; i++) \ 144 for (i = 0; i < priv->num_irqs; i++) \
145 enable_irq(priv->irqs_table[i]); \ 145 enable_irq(priv->irqs_table[i]); \
146 } while (0); 146 } while (0)
147#define cpsw_disable_irq(priv) \ 147#define cpsw_disable_irq(priv) \
148 do { \ 148 do { \
149 u32 i; \ 149 u32 i; \
150 for (i = 0; i < priv->num_irqs; i++) \ 150 for (i = 0; i < priv->num_irqs; i++) \
151 disable_irq_nosync(priv->irqs_table[i]); \ 151 disable_irq_nosync(priv->irqs_table[i]); \
152 } while (0); 152 } while (0)
153 153
154#define cpsw_slave_index(priv) \ 154#define cpsw_slave_index(priv) \
155 ((priv->data.dual_emac) ? priv->emac_port : \ 155 ((priv->data.dual_emac) ? priv->emac_port : \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
248#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ 248#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
249#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ 249#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
250#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ 250#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
251#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */ 251#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
252#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
252#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ 253#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
253#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ 254#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
254#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ 255#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
255#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ 256#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
256#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ 257#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
257 258
258#define CTRL_TS_BITS \ 259#define CTRL_V2_TS_BITS \
259 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \ 260 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
260 TS_ANNEX_D_EN | TS_LTYPE1_EN) 261 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
262
263#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
264#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
265#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
266
261 267
262#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN) 268#define CTRL_V3_TS_BITS \
263#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN) 269 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
264#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN) 270 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
271 TS_LTYPE1_EN)
272
273#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
274#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
275#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
265 276
266/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ 277/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
267#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ 278#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1376 slave = &priv->slaves[priv->data.active_slave]; 1387 slave = &priv->slaves[priv->data.active_slave];
1377 1388
1378 ctrl = slave_read(slave, CPSW2_CONTROL); 1389 ctrl = slave_read(slave, CPSW2_CONTROL);
1379 ctrl &= ~CTRL_ALL_TS_MASK; 1390 switch (priv->version) {
1391 case CPSW_VERSION_2:
1392 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1380 1393
1381 if (priv->cpts->tx_enable) 1394 if (priv->cpts->tx_enable)
1382 ctrl |= CTRL_TX_TS_BITS; 1395 ctrl |= CTRL_V2_TX_TS_BITS;
1383 1396
1384 if (priv->cpts->rx_enable) 1397 if (priv->cpts->rx_enable)
1385 ctrl |= CTRL_RX_TS_BITS; 1398 ctrl |= CTRL_V2_RX_TS_BITS;
1399 break;
1400 case CPSW_VERSION_3:
1401 default:
1402 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1403
1404 if (priv->cpts->tx_enable)
1405 ctrl |= CTRL_V3_TX_TS_BITS;
1406
1407 if (priv->cpts->rx_enable)
1408 ctrl |= CTRL_V3_RX_TS_BITS;
1409 break;
1410 }
1386 1411
1387 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 1412 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1388 1413
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1398 struct hwtstamp_config cfg; 1423 struct hwtstamp_config cfg;
1399 1424
1400 if (priv->version != CPSW_VERSION_1 && 1425 if (priv->version != CPSW_VERSION_1 &&
1401 priv->version != CPSW_VERSION_2) 1426 priv->version != CPSW_VERSION_2 &&
1427 priv->version != CPSW_VERSION_3)
1402 return -EOPNOTSUPP; 1428 return -EOPNOTSUPP;
1403 1429
1404 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1430 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1443 cpsw_hwtstamp_v1(priv); 1469 cpsw_hwtstamp_v1(priv);
1444 break; 1470 break;
1445 case CPSW_VERSION_2: 1471 case CPSW_VERSION_2:
1472 case CPSW_VERSION_3:
1446 cpsw_hwtstamp_v2(priv); 1473 cpsw_hwtstamp_v2(priv);
1447 break; 1474 break;
1448 default: 1475 default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1459 struct hwtstamp_config cfg; 1486 struct hwtstamp_config cfg;
1460 1487
1461 if (priv->version != CPSW_VERSION_1 && 1488 if (priv->version != CPSW_VERSION_1 &&
1462 priv->version != CPSW_VERSION_2) 1489 priv->version != CPSW_VERSION_2 &&
1490 priv->version != CPSW_VERSION_3)
1463 return -EOPNOTSUPP; 1491 return -EOPNOTSUPP;
1464 1492
1465 cfg.flags = 0; 1493 cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1780 return -EINVAL; 1808 return -EINVAL;
1781 1809
1782 if (of_property_read_u32(node, "slaves", &prop)) { 1810 if (of_property_read_u32(node, "slaves", &prop)) {
1783 pr_err("Missing slaves property in the DT.\n"); 1811 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
1784 return -EINVAL; 1812 return -EINVAL;
1785 } 1813 }
1786 data->slaves = prop; 1814 data->slaves = prop;
1787 1815
1788 if (of_property_read_u32(node, "active_slave", &prop)) { 1816 if (of_property_read_u32(node, "active_slave", &prop)) {
1789 pr_err("Missing active_slave property in the DT.\n"); 1817 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
1790 return -EINVAL; 1818 return -EINVAL;
1791 } 1819 }
1792 data->active_slave = prop; 1820 data->active_slave = prop;
1793 1821
1794 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1822 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1795 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1823 dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
1796 return -EINVAL; 1824 return -EINVAL;
1797 } 1825 }
1798 data->cpts_clock_mult = prop; 1826 data->cpts_clock_mult = prop;
1799 1827
1800 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { 1828 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1801 pr_err("Missing cpts_clock_shift property in the DT.\n"); 1829 dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
1802 return -EINVAL; 1830 return -EINVAL;
1803 } 1831 }
1804 data->cpts_clock_shift = prop; 1832 data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1810 return -ENOMEM; 1838 return -ENOMEM;
1811 1839
1812 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 1840 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
1813 pr_err("Missing cpdma_channels property in the DT.\n"); 1841 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
1814 return -EINVAL; 1842 return -EINVAL;
1815 } 1843 }
1816 data->channels = prop; 1844 data->channels = prop;
1817 1845
1818 if (of_property_read_u32(node, "ale_entries", &prop)) { 1846 if (of_property_read_u32(node, "ale_entries", &prop)) {
1819 pr_err("Missing ale_entries property in the DT.\n"); 1847 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
1820 return -EINVAL; 1848 return -EINVAL;
1821 } 1849 }
1822 data->ale_entries = prop; 1850 data->ale_entries = prop;
1823 1851
1824 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1852 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
1825 pr_err("Missing bd_ram_size property in the DT.\n"); 1853 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
1826 return -EINVAL; 1854 return -EINVAL;
1827 } 1855 }
1828 data->bd_ram_size = prop; 1856 data->bd_ram_size = prop;
1829 1857
1830 if (of_property_read_u32(node, "rx_descs", &prop)) { 1858 if (of_property_read_u32(node, "rx_descs", &prop)) {
1831 pr_err("Missing rx_descs property in the DT.\n"); 1859 dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
1832 return -EINVAL; 1860 return -EINVAL;
1833 } 1861 }
1834 data->rx_descs = prop; 1862 data->rx_descs = prop;
1835 1863
1836 if (of_property_read_u32(node, "mac_control", &prop)) { 1864 if (of_property_read_u32(node, "mac_control", &prop)) {
1837 pr_err("Missing mac_control property in the DT.\n"); 1865 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
1838 return -EINVAL; 1866 return -EINVAL;
1839 } 1867 }
1840 data->mac_control = prop; 1868 data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1848 ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 1876 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
1849 /* We do not want to force this, as in some cases may not have child */ 1877 /* We do not want to force this, as in some cases may not have child */
1850 if (ret) 1878 if (ret)
1851 pr_warn("Doesn't have any child node\n"); 1879 dev_warn(&pdev->dev, "Doesn't have any child node\n");
1852 1880
1853 for_each_child_of_node(node, slave_node) { 1881 for_each_child_of_node(node, slave_node) {
1854 struct cpsw_slave_data *slave_data = data->slave_data + i; 1882 struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,7 +1893,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1865 1893
1866 parp = of_get_property(slave_node, "phy_id", &lenp); 1894 parp = of_get_property(slave_node, "phy_id", &lenp);
1867 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1895 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1868 pr_err("Missing slave[%d] phy_id property\n", i); 1896 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
1869 return -EINVAL; 1897 return -EINVAL;
1870 } 1898 }
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1899 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
@@ -1885,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1885 1913
1886 slave_data->phy_if = of_get_phy_mode(slave_node); 1914 slave_data->phy_if = of_get_phy_mode(slave_node);
1887 if (slave_data->phy_if < 0) { 1915 if (slave_data->phy_if < 0) {
1888 pr_err("Missing or malformed slave[%d] phy-mode property\n", 1916 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
1889 i); 1917 i);
1890 return slave_data->phy_if; 1918 return slave_data->phy_if;
1891 } 1919 }
1892 1920
1893 if (data->dual_emac) { 1921 if (data->dual_emac) {
1894 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1922 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
1895 &prop)) { 1923 &prop)) {
1896 pr_err("Missing dual_emac_res_vlan in DT.\n"); 1924 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
1897 slave_data->dual_emac_res_vlan = i+1; 1925 slave_data->dual_emac_res_vlan = i+1;
1898 pr_err("Using %d as Reserved VLAN for %d slave\n", 1926 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
1899 slave_data->dual_emac_res_vlan, i); 1927 slave_data->dual_emac_res_vlan, i);
1900 } else { 1928 } else {
1901 slave_data->dual_emac_res_vlan = prop; 1929 slave_data->dual_emac_res_vlan = prop;
1902 } 1930 }
@@ -1920,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1920 1948
1921 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1949 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1922 if (!ndev) { 1950 if (!ndev) {
1923 pr_err("cpsw: error allocating net_device\n"); 1951 dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
1924 return -ENOMEM; 1952 return -ENOMEM;
1925 } 1953 }
1926 1954
@@ -1936,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1936 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1964 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
1937 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 1965 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
1938 ETH_ALEN); 1966 ETH_ALEN);
1939 pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); 1967 dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
1940 } else { 1968 } else {
1941 random_ether_addr(priv_sl2->mac_addr); 1969 random_ether_addr(priv_sl2->mac_addr);
1942 pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); 1970 dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
1943 } 1971 }
1944 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 1972 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
1945 1973
@@ -1970,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1970 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1998 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1971 1999
1972 ndev->netdev_ops = &cpsw_netdev_ops; 2000 ndev->netdev_ops = &cpsw_netdev_ops;
1973 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2001 ndev->ethtool_ops = &cpsw_ethtool_ops;
1974 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2002 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
1975 2003
1976 /* register the network device */ 2004 /* register the network device */
1977 SET_NETDEV_DEV(ndev, &pdev->dev); 2005 SET_NETDEV_DEV(ndev, &pdev->dev);
1978 ret = register_netdev(ndev); 2006 ret = register_netdev(ndev);
1979 if (ret) { 2007 if (ret) {
1980 pr_err("cpsw: error registering net device\n"); 2008 dev_err(&pdev->dev, "cpsw: error registering net device\n");
1981 free_netdev(ndev); 2009 free_netdev(ndev);
1982 ret = -ENODEV; 2010 ret = -ENODEV;
1983 } 2011 }
@@ -1999,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
1999 2027
2000 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 2028 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2001 if (!ndev) { 2029 if (!ndev) {
2002 pr_err("error allocating net_device\n"); 2030 dev_err(&pdev->dev, "error allocating net_device\n");
2003 return -ENOMEM; 2031 return -ENOMEM;
2004 } 2032 }
2005 2033
@@ -2014,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
2014 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 2042 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
2015 priv->irq_enabled = true; 2043 priv->irq_enabled = true;
2016 if (!priv->cpts) { 2044 if (!priv->cpts) {
2017 pr_err("error allocating cpts\n"); 2045 dev_err(&pdev->dev, "error allocating cpts\n");
2018 goto clean_ndev_ret; 2046 goto clean_ndev_ret;
2019 } 2047 }
2020 2048
@@ -2027,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
2027 pinctrl_pm_select_default_state(&pdev->dev); 2055 pinctrl_pm_select_default_state(&pdev->dev);
2028 2056
2029 if (cpsw_probe_dt(&priv->data, pdev)) { 2057 if (cpsw_probe_dt(&priv->data, pdev)) {
2030 pr_err("cpsw: platform data missing\n"); 2058 dev_err(&pdev->dev, "cpsw: platform data missing\n");
2031 ret = -ENODEV; 2059 ret = -ENODEV;
2032 goto clean_runtime_disable_ret; 2060 goto clean_runtime_disable_ret;
2033 } 2061 }
@@ -2035,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
2035 2063
2036 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 2064 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2037 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 2065 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2038 pr_info("Detected MACID = %pM\n", priv->mac_addr); 2066 dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
2039 } else { 2067 } else {
2040 eth_random_addr(priv->mac_addr); 2068 eth_random_addr(priv->mac_addr);
2041 pr_info("Random MACID = %pM\n", priv->mac_addr); 2069 dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
2042 } 2070 }
2043 2071
2044 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 2072 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2199,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
2199 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2227 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2200 2228
2201 ndev->netdev_ops = &cpsw_netdev_ops; 2229 ndev->netdev_ops = &cpsw_netdev_ops;
2202 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2230 ndev->ethtool_ops = &cpsw_ethtool_ops;
2203 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2231 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2204 2232
2205 /* register the network device */ 2233 /* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 243513980b51..6b56f85951e5 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
236 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); 236 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
237} 237}
238 238
239#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk" 239static void cpts_clk_init(struct device *dev, struct cpts *cpts)
240
241static void cpts_clk_init(struct cpts *cpts)
242{ 240{
243 cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME); 241 cpts->refclk = devm_clk_get(dev, "cpts");
244 if (IS_ERR(cpts->refclk)) { 242 if (IS_ERR(cpts->refclk)) {
245 pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME); 243 dev_err(dev, "Failed to get cpts refclk\n");
246 cpts->refclk = NULL; 244 cpts->refclk = NULL;
247 return; 245 return;
248 } 246 }
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
252static void cpts_clk_release(struct cpts *cpts) 250static void cpts_clk_release(struct cpts *cpts)
253{ 251{
254 clk_disable(cpts->refclk); 252 clk_disable(cpts->refclk);
255 clk_put(cpts->refclk);
256} 253}
257 254
258static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 255static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
390 for (i = 0; i < CPTS_MAX_EVENTS; i++) 387 for (i = 0; i < CPTS_MAX_EVENTS; i++)
391 list_add(&cpts->pool_data[i].list, &cpts->pool); 388 list_add(&cpts->pool_data[i].list, &cpts->pool);
392 389
393 cpts_clk_init(cpts); 390 cpts_clk_init(dev, cpts);
394 cpts_write32(cpts, CPTS_EN, control); 391 cpts_write32(cpts, CPTS_EN, control);
395 cpts_write32(cpts, TS_PEND_EN, int_enable); 392 cpts_write32(cpts, TS_PEND_EN, int_enable);
396 393
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 88ef27067bf2..4a000f6dd6fc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
158 int bitmap_size; 158 int bitmap_size;
159 struct cpdma_desc_pool *pool; 159 struct cpdma_desc_pool *pool;
160 160
161 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
162 if (!pool) 162 if (!pool)
163 return NULL; 163 goto fail;
164 164
165 spin_lock_init(&pool->lock); 165 spin_lock_init(&pool->lock);
166 166
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
170 pool->num_desc = size / pool->desc_size; 170 pool->num_desc = size / pool->desc_size;
171 171
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
173 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
174 if (!pool->bitmap) 174 if (!pool->bitmap)
175 goto fail; 175 goto fail;
176 176
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
187 187
188 if (pool->iomap) 188 if (pool->iomap)
189 return pool; 189 return pool;
190
191fail: 190fail:
192 kfree(pool->bitmap);
193 kfree(pool);
194 return NULL; 191 return NULL;
195} 192}
196 193
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
203 200
204 spin_lock_irqsave(&pool->lock, flags); 201 spin_lock_irqsave(&pool->lock, flags);
205 WARN_ON(pool->used_desc); 202 WARN_ON(pool->used_desc);
206 kfree(pool->bitmap);
207 if (pool->cpumap) { 203 if (pool->cpumap) {
208 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 204 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
209 pool->phys); 205 pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
211 iounmap(pool->iomap); 207 iounmap(pool->iomap);
212 } 208 }
213 spin_unlock_irqrestore(&pool->lock, flags); 209 spin_unlock_irqrestore(&pool->lock, flags);
214 kfree(pool);
215} 210}
216 211
217static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 212static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276{ 271{
277 struct cpdma_ctlr *ctlr; 272 struct cpdma_ctlr *ctlr;
278 273
279 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); 274 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
280 if (!ctlr) 275 if (!ctlr)
281 return NULL; 276 return NULL;
282 277
@@ -290,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
290 ctlr->params.desc_hw_addr, 285 ctlr->params.desc_hw_addr,
291 ctlr->params.desc_mem_size, 286 ctlr->params.desc_mem_size,
292 ctlr->params.desc_align); 287 ctlr->params.desc_align);
293 if (!ctlr->pool) { 288 if (!ctlr->pool)
294 kfree(ctlr);
295 return NULL; 289 return NULL;
296 }
297 290
298 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 291 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
299 ctlr->num_chan = CPDMA_MAX_CHANNELS; 292 ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -468,7 +461,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
468 461
469 cpdma_desc_pool_destroy(ctlr->pool); 462 cpdma_desc_pool_destroy(ctlr->pool);
470 spin_unlock_irqrestore(&ctlr->lock, flags); 463 spin_unlock_irqrestore(&ctlr->lock, flags);
471 kfree(ctlr);
472 return ret; 464 return ret;
473} 465}
474EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 466EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +499,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
507 cpdma_handler_fn handler) 499 cpdma_handler_fn handler)
508{ 500{
509 struct cpdma_chan *chan; 501 struct cpdma_chan *chan;
510 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 502 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
511 unsigned long flags; 503 unsigned long flags;
512 504
513 if (__chan_linear(chan_num) >= ctlr->num_chan) 505 if (__chan_linear(chan_num) >= ctlr->num_chan)
514 return NULL; 506 return NULL;
515 507
516 ret = -ENOMEM; 508 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
517 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
518 if (!chan) 509 if (!chan)
519 goto err_chan_alloc; 510 return ERR_PTR(-ENOMEM);
520 511
521 spin_lock_irqsave(&ctlr->lock, flags); 512 spin_lock_irqsave(&ctlr->lock, flags);
522 ret = -EBUSY; 513 if (ctlr->channels[chan_num]) {
523 if (ctlr->channels[chan_num]) 514 spin_unlock_irqrestore(&ctlr->lock, flags);
524 goto err_chan_busy; 515 devm_kfree(ctlr->dev, chan);
516 return ERR_PTR(-EBUSY);
517 }
525 518
526 chan->ctlr = ctlr; 519 chan->ctlr = ctlr;
527 chan->state = CPDMA_STATE_IDLE; 520 chan->state = CPDMA_STATE_IDLE;
@@ -551,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
551 ctlr->channels[chan_num] = chan; 544 ctlr->channels[chan_num] = chan;
552 spin_unlock_irqrestore(&ctlr->lock, flags); 545 spin_unlock_irqrestore(&ctlr->lock, flags);
553 return chan; 546 return chan;
554
555err_chan_busy:
556 spin_unlock_irqrestore(&ctlr->lock, flags);
557 kfree(chan);
558err_chan_alloc:
559 return ERR_PTR(ret);
560} 547}
561EXPORT_SYMBOL_GPL(cpdma_chan_create); 548EXPORT_SYMBOL_GPL(cpdma_chan_create);
562 549
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8f0e69ce07ca..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, 1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
1568 res_num))) { 1568 res_num))) {
1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) { 1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) {
1570 dev_err(emac_dev, "Request IRQ %d\n", irq_num);
1571 if (request_irq(irq_num, emac_irq, 0, ndev->name, 1570 if (request_irq(irq_num, emac_irq, 0, ndev->name,
1572 ndev)) { 1571 ndev)) {
1573 dev_err(emac_dev, 1572 dev_err(emac_dev,
@@ -1865,7 +1864,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
1865 struct emac_priv *priv; 1864 struct emac_priv *priv;
1866 unsigned long hw_ram_addr; 1865 unsigned long hw_ram_addr;
1867 struct emac_platform_data *pdata; 1866 struct emac_platform_data *pdata;
1868 struct device *emac_dev;
1869 struct cpdma_params dma_params; 1867 struct cpdma_params dma_params;
1870 struct clk *emac_clk; 1868 struct clk *emac_clk;
1871 unsigned long emac_bus_frequency; 1869 unsigned long emac_bus_frequency;
@@ -1911,7 +1909,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
1911 priv->coal_intvl = 0; 1909 priv->coal_intvl = 0;
1912 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); 1910 priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
1913 1911
1914 emac_dev = &ndev->dev;
1915 /* Get EMAC platform data */ 1912 /* Get EMAC platform data */
1916 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1913 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1917 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1914 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1930 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; 1927 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
1931 1928
1932 memset(&dma_params, 0, sizeof(dma_params)); 1929 memset(&dma_params, 0, sizeof(dma_params));
1933 dma_params.dev = emac_dev; 1930 dma_params.dev = &pdev->dev;
1934 dma_params.dmaregs = priv->emac_base; 1931 dma_params.dmaregs = priv->emac_base;
1935 dma_params.rxthresh = priv->emac_base + 0x120; 1932 dma_params.rxthresh = priv->emac_base + 0x120;
1936 dma_params.rxfree = priv->emac_base + 0x140; 1933 dma_params.rxfree = priv->emac_base + 0x140;
@@ -1980,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1980 } 1977 }
1981 1978
1982 ndev->netdev_ops = &emac_netdev_ops; 1979 ndev->netdev_ops = &emac_netdev_ops;
1983 SET_ETHTOOL_OPS(ndev, &ethtool_ops); 1980 ndev->ethtool_ops = &ethtool_ops;
1984 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); 1981 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
1985 1982
1986 /* register the network device */ 1983 /* register the network device */
@@ -1994,7 +1991,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1994 1991
1995 1992
1996 if (netif_msg_probe(priv)) { 1993 if (netif_msg_probe(priv)) {
1997 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ 1994 dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
1998 "(regs: %p, irq: %d)\n", 1995 "(regs: %p, irq: %d)\n",
1999 (void *)priv->emac_base_phys, ndev->irq); 1996 (void *)priv->emac_base_phys, ndev->irq);
2000 } 1997 }
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9dec5d82..735dc53d4b01 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
303 return -EINVAL; 303 return -EINVAL;
304 304
305 if (of_property_read_u32(node, "bus_freq", &prop)) { 305 if (of_property_read_u32(node, "bus_freq", &prop)) {
306 pr_err("Missing bus_freq property in the DT.\n"); 306 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
307 return -EINVAL; 307 return -EINVAL;
308 } 308 }
309 data->bus_freq = prop; 309 data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
321 struct phy_device *phy; 321 struct phy_device *phy;
322 int ret, addr; 322 int ret, addr;
323 323
324 data = kzalloc(sizeof(*data), GFP_KERNEL); 324 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
325 if (!data) 325 if (!data)
326 return -ENOMEM; 326 return -ENOMEM;
327 327
328 data->bus = mdiobus_alloc(); 328 data->bus = devm_mdiobus_alloc(dev);
329 if (!data->bus) { 329 if (!data->bus) {
330 dev_err(dev, "failed to alloc mii bus\n"); 330 dev_err(dev, "failed to alloc mii bus\n");
331 ret = -ENOMEM; 331 return -ENOMEM;
332 goto bail_out;
333 } 332 }
334 333
335 if (dev->of_node) { 334 if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
349 data->bus->parent = dev; 348 data->bus->parent = dev;
350 data->bus->priv = data; 349 data->bus->priv = data;
351 350
352 /* Select default pin state */
353 pinctrl_pm_select_default_state(&pdev->dev);
354
355 pm_runtime_enable(&pdev->dev); 351 pm_runtime_enable(&pdev->dev);
356 pm_runtime_get_sync(&pdev->dev); 352 pm_runtime_get_sync(&pdev->dev);
357 data->clk = clk_get(&pdev->dev, "fck"); 353 data->clk = devm_clk_get(dev, "fck");
358 if (IS_ERR(data->clk)) { 354 if (IS_ERR(data->clk)) {
359 dev_err(dev, "failed to get device clock\n"); 355 dev_err(dev, "failed to get device clock\n");
360 ret = PTR_ERR(data->clk); 356 ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
367 spin_lock_init(&data->lock); 363 spin_lock_init(&data->lock);
368 364
369 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 365 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
370 if (!res) { 366 data->regs = devm_ioremap_resource(dev, res);
371 dev_err(dev, "could not find register map resource\n"); 367 if (IS_ERR(data->regs)) {
372 ret = -ENOENT; 368 ret = PTR_ERR(data->regs);
373 goto bail_out;
374 }
375
376 res = devm_request_mem_region(dev, res->start, resource_size(res),
377 dev_name(dev));
378 if (!res) {
379 dev_err(dev, "could not allocate register map resource\n");
380 ret = -ENXIO;
381 goto bail_out;
382 }
383
384 data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
385 if (!data->regs) {
386 dev_err(dev, "could not map mdio registers\n");
387 ret = -ENOMEM;
388 goto bail_out; 369 goto bail_out;
389 } 370 }
390 371
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
406 return 0; 387 return 0;
407 388
408bail_out: 389bail_out:
409 if (data->bus)
410 mdiobus_free(data->bus);
411
412 if (data->clk)
413 clk_put(data->clk);
414 pm_runtime_put_sync(&pdev->dev); 390 pm_runtime_put_sync(&pdev->dev);
415 pm_runtime_disable(&pdev->dev); 391 pm_runtime_disable(&pdev->dev);
416 392
417 kfree(data);
418
419 return ret; 393 return ret;
420} 394}
421 395
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423{ 397{
424 struct davinci_mdio_data *data = platform_get_drvdata(pdev); 398 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
425 399
426 if (data->bus) { 400 if (data->bus)
427 mdiobus_unregister(data->bus); 401 mdiobus_unregister(data->bus);
428 mdiobus_free(data->bus);
429 }
430 402
431 if (data->clk)
432 clk_put(data->clk);
433 pm_runtime_put_sync(&pdev->dev); 403 pm_runtime_put_sync(&pdev->dev);
434 pm_runtime_disable(&pdev->dev); 404 pm_runtime_disable(&pdev->dev);
435 405
436 kfree(data);
437
438 return 0; 406 return 0;
439} 407}
440 408
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 449011b0e007..14389f841d43 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2192,7 +2192,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2192{ 2192{
2193 int ret; 2193 int ret;
2194 int i; 2194 int i;
2195 int nz_addr = 0;
2196 struct net_device *dev; 2195 struct net_device *dev;
2197 struct tile_net_priv *priv; 2196 struct tile_net_priv *priv;
2198 2197
@@ -2212,7 +2211,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2212 2211
2213 /* Initialize "priv". */ 2212 /* Initialize "priv". */
2214 priv = netdev_priv(dev); 2213 priv = netdev_priv(dev);
2215 memset(priv, 0, sizeof(*priv));
2216 priv->dev = dev; 2214 priv->dev = dev;
2217 priv->channel = -1; 2215 priv->channel = -1;
2218 priv->loopify_channel = -1; 2216 priv->loopify_channel = -1;
@@ -2223,15 +2221,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
2223 * be done before the device is opened. If the MAC is all zeroes, 2221 * be done before the device is opened. If the MAC is all zeroes,
2224 * we use a random address, since we're probably on the simulator. 2222 * we use a random address, since we're probably on the simulator.
2225 */ 2223 */
2226 for (i = 0; i < 6; i++) 2224 if (!is_zero_ether_addr(mac))
2227 nz_addr |= mac[i]; 2225 ether_addr_copy(dev->dev_addr, mac);
2228 2226 else
2229 if (nz_addr) {
2230 memcpy(dev->dev_addr, mac, ETH_ALEN);
2231 dev->addr_len = 6;
2232 } else {
2233 eth_hw_addr_random(dev); 2227 eth_hw_addr_random(dev);
2234 }
2235 2228
2236 /* Register the network device. */ 2229 /* Register the network device. */
2237 ret = register_netdev(dev); 2230 ret = register_netdev(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d899d0072ae0..bb7992804664 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1561,7 +1561,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1561 * alloc netdev 1561 * alloc netdev
1562 */ 1562 */
1563 *netdev = alloc_etherdev(sizeof(struct gelic_port)); 1563 *netdev = alloc_etherdev(sizeof(struct gelic_port));
1564 if (!netdev) { 1564 if (!*netdev) {
1565 kfree(card->unalign); 1565 kfree(card->unalign);
1566 return NULL; 1566 return NULL;
1567 } 1567 }
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2b4474..f66ddaee0c87 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
19 19
20config VIA_RHINE 20config VIA_RHINE
21 tristate "VIA Rhine support" 21 tristate "VIA Rhine support"
22 depends on PCI 22 depends on (PCI || USE_OF)
23 select CRC32 23 select CRC32
24 select MII 24 select MII
25 ---help--- 25 ---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b72bb2..2d72f96a9e2c 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
94#include <linux/ioport.h> 94#include <linux/ioport.h>
95#include <linux/interrupt.h> 95#include <linux/interrupt.h>
96#include <linux/pci.h> 96#include <linux/pci.h>
97#include <linux/of_address.h>
98#include <linux/of_device.h>
99#include <linux/of_irq.h>
100#include <linux/platform_device.h>
97#include <linux/dma-mapping.h> 101#include <linux/dma-mapping.h>
98#include <linux/netdevice.h> 102#include <linux/netdevice.h>
99#include <linux/etherdevice.h> 103#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
116static const char version[] = 120static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 121 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 122
119/* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121#ifdef CONFIG_VIA_RHINE_MMIO
122#define USE_MMIO
123#else
124#endif
125
126MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 123MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 124MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128MODULE_LICENSE("GPL"); 125MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 257 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 258 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */ 259 rqRhineI = 0x0100, /* See comment below */
260 rqIntPHY = 0x0200, /* Integrated PHY */
261 rqMgmt = 0x0400, /* Management adapter */
262 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
263 * switched from PIO mode to MMIO
264 * (only applies to PCI)
265 */
263}; 266};
264/* 267/*
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
279}; 282};
280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 284
285/* OpenFirmware identifiers for platform-bus devices
286 * The .data field is currently only used to store quirks
287 */
288static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
289static struct of_device_id rhine_of_tbl[] = {
290 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
291 { } /* terminate list */
292};
293MODULE_DEVICE_TABLE(of, rhine_of_tbl);
282 294
283/* Offsets to the device registers. */ 295/* Offsets to the device registers. */
284enum register_offsets { 296enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
338 BCR1_MED1=0x80, /* for VT6102 */ 350 BCR1_MED1=0x80, /* for VT6102 */
339}; 351};
340 352
341#ifdef USE_MMIO
342/* Registers we check that mmio and reg are the same. */ 353/* Registers we check that mmio and reg are the same. */
343static const int mmio_verify_registers[] = { 354static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 355 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 0 356 0
346}; 357};
347#endif
348 358
349/* Bits in the interrupt status/mask registers. */ 359/* Bits in the interrupt status/mask registers. */
350enum intr_status_bits { 360enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
446 unsigned char *tx_bufs; 456 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma; 457 dma_addr_t tx_bufs_dma;
448 458
449 struct pci_dev *pdev; 459 int irq;
450 long pioaddr; 460 long pioaddr;
451 struct net_device *dev; 461 struct net_device *dev;
452 struct napi_struct napi; 462 struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
649 "failed" : "succeeded"); 659 "failed" : "succeeded");
650} 660}
651 661
652#ifdef USE_MMIO
653static void enable_mmio(long pioaddr, u32 quirks) 662static void enable_mmio(long pioaddr, u32 quirks)
654{ 663{
655 int n; 664 int n;
656 if (quirks & rqRhineI) { 665
657 /* More recent docs say that this bit is reserved ... */ 666 if (quirks & rqNeedEnMMIO) {
658 n = inb(pioaddr + ConfigA) | 0x20; 667 if (quirks & rqRhineI) {
659 outb(n, pioaddr + ConfigA); 668 /* More recent docs say that this bit is reserved */
660 } else { 669 n = inb(pioaddr + ConfigA) | 0x20;
661 n = inb(pioaddr + ConfigD) | 0x80; 670 outb(n, pioaddr + ConfigA);
662 outb(n, pioaddr + ConfigD); 671 } else {
672 n = inb(pioaddr + ConfigD) | 0x80;
673 outb(n, pioaddr + ConfigD);
674 }
663 } 675 }
664} 676}
665#endif 677
678static inline int verify_mmio(struct device *hwdev,
679 long pioaddr,
680 void __iomem *ioaddr,
681 u32 quirks)
682{
683 if (quirks & rqNeedEnMMIO) {
684 int i = 0;
685
686 /* Check that selected MMIO registers match the PIO ones */
687 while (mmio_verify_registers[i]) {
688 int reg = mmio_verify_registers[i++];
689 unsigned char a = inb(pioaddr+reg);
690 unsigned char b = readb(ioaddr+reg);
691
692 if (a != b) {
693 dev_err(hwdev,
694 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
695 reg, a, b);
696 return -EIO;
697 }
698 }
699 }
700 return 0;
701}
666 702
667/* 703/*
668 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 704 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
682 if (i > 512) 718 if (i > 512)
683 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); 719 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
684 720
685#ifdef USE_MMIO
686 /* 721 /*
687 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 722 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
688 * MMIO. If reloading EEPROM was done first this could be avoided, but 723 * MMIO. If reloading EEPROM was done first this could be avoided, but
689 * it is not known if that still works with the "win98-reboot" problem. 724 * it is not known if that still works with the "win98-reboot" problem.
690 */ 725 */
691 enable_mmio(pioaddr, rp->quirks); 726 enable_mmio(pioaddr, rp->quirks);
692#endif
693 727
694 /* Turn off EEPROM-controlled wake-up (magic packet) */ 728 /* Turn off EEPROM-controlled wake-up (magic packet) */
695 if (rp->quirks & rqWOL) 729 if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
701static void rhine_poll(struct net_device *dev) 735static void rhine_poll(struct net_device *dev)
702{ 736{
703 struct rhine_private *rp = netdev_priv(dev); 737 struct rhine_private *rp = netdev_priv(dev);
704 const int irq = rp->pdev->irq; 738 const int irq = rp->irq;
705 739
706 disable_irq(irq); 740 disable_irq(irq);
707 rhine_interrupt(irq, dev); 741 rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
846 msleep(5); 880 msleep(5);
847 881
848 /* Reload EEPROM controlled bytes cleared by soft reset */ 882 /* Reload EEPROM controlled bytes cleared by soft reset */
849 rhine_reload_eeprom(pioaddr, dev); 883 if (dev_is_pci(dev->dev.parent))
884 rhine_reload_eeprom(pioaddr, dev);
850} 885}
851 886
852static const struct net_device_ops rhine_netdev_ops = { 887static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
867#endif 902#endif
868}; 903};
869 904
870static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 905static int rhine_init_one_common(struct device *hwdev, u32 quirks,
906 long pioaddr, void __iomem *ioaddr, int irq)
871{ 907{
872 struct net_device *dev; 908 struct net_device *dev;
873 struct rhine_private *rp; 909 struct rhine_private *rp;
874 int i, rc; 910 int i, rc, phy_id;
875 u32 quirks;
876 long pioaddr;
877 long memaddr;
878 void __iomem *ioaddr;
879 int io_size, phy_id;
880 const char *name; 911 const char *name;
881#ifdef USE_MMIO
882 int bar = 1;
883#else
884 int bar = 0;
885#endif
886
887/* when built into the kernel, we only print version if device is found */
888#ifndef MODULE
889 pr_info_once("%s\n", version);
890#endif
891
892 io_size = 256;
893 phy_id = 0;
894 quirks = 0;
895 name = "Rhine";
896 if (pdev->revision < VTunknown0) {
897 quirks = rqRhineI;
898 io_size = 128;
899 }
900 else if (pdev->revision >= VT6102) {
901 quirks = rqWOL | rqForceReset;
902 if (pdev->revision < VT6105) {
903 name = "Rhine II";
904 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
905 }
906 else {
907 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
908 if (pdev->revision >= VT6105_B0)
909 quirks |= rq6patterns;
910 if (pdev->revision < VT6105M)
911 name = "Rhine III";
912 else
913 name = "Rhine III (Management Adapter)";
914 }
915 }
916
917 rc = pci_enable_device(pdev);
918 if (rc)
919 goto err_out;
920 912
921 /* this should always be supported */ 913 /* this should always be supported */
922 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 914 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
923 if (rc) { 915 if (rc) {
924 dev_err(&pdev->dev, 916 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
925 "32-bit PCI DMA addresses not supported by the card!?\n"); 917 goto err_out;
926 goto err_out_pci_disable;
927 }
928
929 /* sanity check */
930 if ((pci_resource_len(pdev, 0) < io_size) ||
931 (pci_resource_len(pdev, 1) < io_size)) {
932 rc = -EIO;
933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 goto err_out_pci_disable;
935 } 918 }
936 919
937 pioaddr = pci_resource_start(pdev, 0);
938 memaddr = pci_resource_start(pdev, 1);
939
940 pci_set_master(pdev);
941
942 dev = alloc_etherdev(sizeof(struct rhine_private)); 920 dev = alloc_etherdev(sizeof(struct rhine_private));
943 if (!dev) { 921 if (!dev) {
944 rc = -ENOMEM; 922 rc = -ENOMEM;
945 goto err_out_pci_disable; 923 goto err_out;
946 } 924 }
947 SET_NETDEV_DEV(dev, &pdev->dev); 925 SET_NETDEV_DEV(dev, hwdev);
948 926
949 rp = netdev_priv(dev); 927 rp = netdev_priv(dev);
950 rp->dev = dev; 928 rp->dev = dev;
951 rp->quirks = quirks; 929 rp->quirks = quirks;
952 rp->pioaddr = pioaddr; 930 rp->pioaddr = pioaddr;
953 rp->pdev = pdev; 931 rp->base = ioaddr;
932 rp->irq = irq;
954 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 933 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955 934
956 rc = pci_request_regions(pdev, DRV_NAME); 935 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
957 if (rc)
958 goto err_out_free_netdev;
959
960 ioaddr = pci_iomap(pdev, bar, io_size);
961 if (!ioaddr) {
962 rc = -EIO;
963 dev_err(&pdev->dev,
964 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 pci_name(pdev), io_size, memaddr);
966 goto err_out_free_res;
967 }
968
969#ifdef USE_MMIO
970 enable_mmio(pioaddr, quirks);
971
972 /* Check that selected MMIO registers match the PIO ones */
973 i = 0;
974 while (mmio_verify_registers[i]) {
975 int reg = mmio_verify_registers[i++];
976 unsigned char a = inb(pioaddr+reg);
977 unsigned char b = readb(ioaddr+reg);
978 if (a != b) {
979 rc = -EIO;
980 dev_err(&pdev->dev,
981 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 reg, a, b);
983 goto err_out_unmap;
984 }
985 }
986#endif /* USE_MMIO */
987
988 rp->base = ioaddr;
989 936
990 u64_stats_init(&rp->tx_stats.syncp); 937 u64_stats_init(&rp->tx_stats.syncp);
991 u64_stats_init(&rp->rx_stats.syncp); 938 u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1030 if (rp->quirks & rqRhineI) 977 if (rp->quirks & rqRhineI)
1031 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 978 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1032 979
1033 if (pdev->revision >= VT6105M) 980 if (rp->quirks & rqMgmt)
1034 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 981 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1035 NETIF_F_HW_VLAN_CTAG_RX | 982 NETIF_F_HW_VLAN_CTAG_RX |
1036 NETIF_F_HW_VLAN_CTAG_FILTER; 983 NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1038 /* dev->name not defined before register_netdev()! */ 985 /* dev->name not defined before register_netdev()! */
1039 rc = register_netdev(dev); 986 rc = register_netdev(dev);
1040 if (rc) 987 if (rc)
1041 goto err_out_unmap; 988 goto err_out_free_netdev;
989
990 if (rp->quirks & rqRhineI)
991 name = "Rhine";
992 else if (rp->quirks & rqStatusWBRace)
993 name = "Rhine II";
994 else if (rp->quirks & rqMgmt)
995 name = "Rhine III (Management Adapter)";
996 else
997 name = "Rhine III";
1042 998
1043 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 999 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1044 name, 1000 name, (long)ioaddr, dev->dev_addr, rp->irq);
1045#ifdef USE_MMIO
1046 memaddr,
1047#else
1048 (long)ioaddr,
1049#endif
1050 dev->dev_addr, pdev->irq);
1051 1001
1052 pci_set_drvdata(pdev, dev); 1002 dev_set_drvdata(hwdev, dev);
1053 1003
1054 { 1004 {
1055 u16 mii_cmd; 1005 u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1078 1028
1079 return 0; 1029 return 0;
1080 1030
1031err_out_free_netdev:
1032 free_netdev(dev);
1033err_out:
1034 return rc;
1035}
1036
1037static int rhine_init_one_pci(struct pci_dev *pdev,
1038 const struct pci_device_id *ent)
1039{
1040 struct device *hwdev = &pdev->dev;
1041 int rc;
1042 long pioaddr, memaddr;
1043 void __iomem *ioaddr;
1044 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1045
1046/* This driver was written to use PCI memory space. Some early versions
1047 * of the Rhine may only work correctly with I/O space accesses.
1048 * TODO: determine for which revisions this is true and assign the flag
1049 * in code as opposed to this Kconfig option (???)
1050 */
1051#ifdef CONFIG_VIA_RHINE_MMIO
1052 u32 quirks = rqNeedEnMMIO;
1053#else
1054 u32 quirks = 0;
1055#endif
1056
1057/* when built into the kernel, we only print version if device is found */
1058#ifndef MODULE
1059 pr_info_once("%s\n", version);
1060#endif
1061
1062 rc = pci_enable_device(pdev);
1063 if (rc)
1064 goto err_out;
1065
1066 if (pdev->revision < VTunknown0) {
1067 quirks |= rqRhineI;
1068 } else if (pdev->revision >= VT6102) {
1069 quirks |= rqWOL | rqForceReset;
1070 if (pdev->revision < VT6105) {
1071 quirks |= rqStatusWBRace;
1072 } else {
1073 quirks |= rqIntPHY;
1074 if (pdev->revision >= VT6105_B0)
1075 quirks |= rq6patterns;
1076 if (pdev->revision >= VT6105M)
1077 quirks |= rqMgmt;
1078 }
1079 }
1080
1081 /* sanity check */
1082 if ((pci_resource_len(pdev, 0) < io_size) ||
1083 (pci_resource_len(pdev, 1) < io_size)) {
1084 rc = -EIO;
1085 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1086 goto err_out_pci_disable;
1087 }
1088
1089 pioaddr = pci_resource_start(pdev, 0);
1090 memaddr = pci_resource_start(pdev, 1);
1091
1092 pci_set_master(pdev);
1093
1094 rc = pci_request_regions(pdev, DRV_NAME);
1095 if (rc)
1096 goto err_out_pci_disable;
1097
1098 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1099 if (!ioaddr) {
1100 rc = -EIO;
1101 dev_err(hwdev,
1102 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1103 dev_name(hwdev), io_size, memaddr);
1104 goto err_out_free_res;
1105 }
1106
1107 enable_mmio(pioaddr, quirks);
1108
1109 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1110 if (rc)
1111 goto err_out_unmap;
1112
1113 rc = rhine_init_one_common(&pdev->dev, quirks,
1114 pioaddr, ioaddr, pdev->irq);
1115 if (!rc)
1116 return 0;
1117
1081err_out_unmap: 1118err_out_unmap:
1082 pci_iounmap(pdev, ioaddr); 1119 pci_iounmap(pdev, ioaddr);
1083err_out_free_res: 1120err_out_free_res:
1084 pci_release_regions(pdev); 1121 pci_release_regions(pdev);
1085err_out_free_netdev:
1086 free_netdev(dev);
1087err_out_pci_disable: 1122err_out_pci_disable:
1088 pci_disable_device(pdev); 1123 pci_disable_device(pdev);
1089err_out: 1124err_out:
1090 return rc; 1125 return rc;
1091} 1126}
1092 1127
1128static int rhine_init_one_platform(struct platform_device *pdev)
1129{
1130 const struct of_device_id *match;
1131 const u32 *quirks;
1132 int irq;
1133 struct resource *res;
1134 void __iomem *ioaddr;
1135
1136 match = of_match_device(rhine_of_tbl, &pdev->dev);
1137 if (!match)
1138 return -EINVAL;
1139
1140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1142 if (IS_ERR(ioaddr))
1143 return PTR_ERR(ioaddr);
1144
1145 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1146 if (!irq)
1147 return -EINVAL;
1148
1149 quirks = match->data;
1150 if (!quirks)
1151 return -EINVAL;
1152
1153 return rhine_init_one_common(&pdev->dev, *quirks,
1154 (long)ioaddr, ioaddr, irq);
1155}
1156
1093static int alloc_ring(struct net_device* dev) 1157static int alloc_ring(struct net_device* dev)
1094{ 1158{
1095 struct rhine_private *rp = netdev_priv(dev); 1159 struct rhine_private *rp = netdev_priv(dev);
1160 struct device *hwdev = dev->dev.parent;
1096 void *ring; 1161 void *ring;
1097 dma_addr_t ring_dma; 1162 dma_addr_t ring_dma;
1098 1163
1099 ring = pci_alloc_consistent(rp->pdev, 1164 ring = dma_alloc_coherent(hwdev,
1100 RX_RING_SIZE * sizeof(struct rx_desc) + 1165 RX_RING_SIZE * sizeof(struct rx_desc) +
1101 TX_RING_SIZE * sizeof(struct tx_desc), 1166 TX_RING_SIZE * sizeof(struct tx_desc),
1102 &ring_dma); 1167 &ring_dma,
1168 GFP_ATOMIC);
1103 if (!ring) { 1169 if (!ring) {
1104 netdev_err(dev, "Could not allocate DMA memory\n"); 1170 netdev_err(dev, "Could not allocate DMA memory\n");
1105 return -ENOMEM; 1171 return -ENOMEM;
1106 } 1172 }
1107 if (rp->quirks & rqRhineI) { 1173 if (rp->quirks & rqRhineI) {
1108 rp->tx_bufs = pci_alloc_consistent(rp->pdev, 1174 rp->tx_bufs = dma_alloc_coherent(hwdev,
1109 PKT_BUF_SZ * TX_RING_SIZE, 1175 PKT_BUF_SZ * TX_RING_SIZE,
1110 &rp->tx_bufs_dma); 1176 &rp->tx_bufs_dma,
1177 GFP_ATOMIC);
1111 if (rp->tx_bufs == NULL) { 1178 if (rp->tx_bufs == NULL) {
1112 pci_free_consistent(rp->pdev, 1179 dma_free_coherent(hwdev,
1113 RX_RING_SIZE * sizeof(struct rx_desc) + 1180 RX_RING_SIZE * sizeof(struct rx_desc) +
1114 TX_RING_SIZE * sizeof(struct tx_desc), 1181 TX_RING_SIZE * sizeof(struct tx_desc),
1115 ring, ring_dma); 1182 ring, ring_dma);
1116 return -ENOMEM; 1183 return -ENOMEM;
1117 } 1184 }
1118 } 1185 }
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
1128static void free_ring(struct net_device* dev) 1195static void free_ring(struct net_device* dev)
1129{ 1196{
1130 struct rhine_private *rp = netdev_priv(dev); 1197 struct rhine_private *rp = netdev_priv(dev);
1198 struct device *hwdev = dev->dev.parent;
1131 1199
1132 pci_free_consistent(rp->pdev, 1200 dma_free_coherent(hwdev,
1133 RX_RING_SIZE * sizeof(struct rx_desc) + 1201 RX_RING_SIZE * sizeof(struct rx_desc) +
1134 TX_RING_SIZE * sizeof(struct tx_desc), 1202 TX_RING_SIZE * sizeof(struct tx_desc),
1135 rp->rx_ring, rp->rx_ring_dma); 1203 rp->rx_ring, rp->rx_ring_dma);
1136 rp->tx_ring = NULL; 1204 rp->tx_ring = NULL;
1137 1205
1138 if (rp->tx_bufs) 1206 if (rp->tx_bufs)
1139 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, 1207 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1140 rp->tx_bufs, rp->tx_bufs_dma); 1208 rp->tx_bufs, rp->tx_bufs_dma);
1141 1209
1142 rp->tx_bufs = NULL; 1210 rp->tx_bufs = NULL;
1143 1211
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
1146static void alloc_rbufs(struct net_device *dev) 1214static void alloc_rbufs(struct net_device *dev)
1147{ 1215{
1148 struct rhine_private *rp = netdev_priv(dev); 1216 struct rhine_private *rp = netdev_priv(dev);
1217 struct device *hwdev = dev->dev.parent;
1149 dma_addr_t next; 1218 dma_addr_t next;
1150 int i; 1219 int i;
1151 1220
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
1174 break; 1243 break;
1175 1244
1176 rp->rx_skbuff_dma[i] = 1245 rp->rx_skbuff_dma[i] =
1177 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, 1246 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
1178 PCI_DMA_FROMDEVICE); 1247 DMA_FROM_DEVICE);
1179 if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) { 1248 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
1180 rp->rx_skbuff_dma[i] = 0; 1249 rp->rx_skbuff_dma[i] = 0;
1181 dev_kfree_skb(skb); 1250 dev_kfree_skb(skb);
1182 break; 1251 break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
1190static void free_rbufs(struct net_device* dev) 1259static void free_rbufs(struct net_device* dev)
1191{ 1260{
1192 struct rhine_private *rp = netdev_priv(dev); 1261 struct rhine_private *rp = netdev_priv(dev);
1262 struct device *hwdev = dev->dev.parent;
1193 int i; 1263 int i;
1194 1264
1195 /* Free all the skbuffs in the Rx queue. */ 1265 /* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
1197 rp->rx_ring[i].rx_status = 0; 1267 rp->rx_ring[i].rx_status = 0;
1198 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1268 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1199 if (rp->rx_skbuff[i]) { 1269 if (rp->rx_skbuff[i]) {
1200 pci_unmap_single(rp->pdev, 1270 dma_unmap_single(hwdev,
1201 rp->rx_skbuff_dma[i], 1271 rp->rx_skbuff_dma[i],
1202 rp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1272 rp->rx_buf_sz, DMA_FROM_DEVICE);
1203 dev_kfree_skb(rp->rx_skbuff[i]); 1273 dev_kfree_skb(rp->rx_skbuff[i]);
1204 } 1274 }
1205 rp->rx_skbuff[i] = NULL; 1275 rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
1230static void free_tbufs(struct net_device* dev) 1300static void free_tbufs(struct net_device* dev)
1231{ 1301{
1232 struct rhine_private *rp = netdev_priv(dev); 1302 struct rhine_private *rp = netdev_priv(dev);
1303 struct device *hwdev = dev->dev.parent;
1233 int i; 1304 int i;
1234 1305
1235 for (i = 0; i < TX_RING_SIZE; i++) { 1306 for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
1238 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1309 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1239 if (rp->tx_skbuff[i]) { 1310 if (rp->tx_skbuff[i]) {
1240 if (rp->tx_skbuff_dma[i]) { 1311 if (rp->tx_skbuff_dma[i]) {
1241 pci_unmap_single(rp->pdev, 1312 dma_unmap_single(hwdev,
1242 rp->tx_skbuff_dma[i], 1313 rp->tx_skbuff_dma[i],
1243 rp->tx_skbuff[i]->len, 1314 rp->tx_skbuff[i]->len,
1244 PCI_DMA_TODEVICE); 1315 DMA_TO_DEVICE);
1245 } 1316 }
1246 dev_kfree_skb(rp->tx_skbuff[i]); 1317 dev_kfree_skb(rp->tx_skbuff[i]);
1247 } 1318 }
@@ -1278,8 +1349,9 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1278 /* autoneg is off: Link is always assumed to be up */ 1349 /* autoneg is off: Link is always assumed to be up */
1279 if (!netif_carrier_ok(dev)) 1350 if (!netif_carrier_ok(dev))
1280 netif_carrier_on(dev); 1351 netif_carrier_on(dev);
1281 } else /* Let MMI library update carrier status */ 1352 }
1282 rhine_check_media(dev, 0); 1353
1354 rhine_check_media(dev, 0);
1283 1355
1284 netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1356 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1285 mii->force_media, netif_carrier_ok(dev)); 1357 mii->force_media, netif_carrier_ok(dev));
@@ -1469,7 +1541,7 @@ static void init_registers(struct net_device *dev)
1469 1541
1470 rhine_set_rx_mode(dev); 1542 rhine_set_rx_mode(dev);
1471 1543
1472 if (rp->pdev->revision >= VT6105M) 1544 if (rp->quirks & rqMgmt)
1473 rhine_init_cam_filter(dev); 1545 rhine_init_cam_filter(dev);
1474 1546
1475 napi_enable(&rp->napi); 1547 napi_enable(&rp->napi);
@@ -1581,16 +1653,15 @@ static int rhine_open(struct net_device *dev)
1581 void __iomem *ioaddr = rp->base; 1653 void __iomem *ioaddr = rp->base;
1582 int rc; 1654 int rc;
1583 1655
1584 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, 1656 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1585 dev);
1586 if (rc) 1657 if (rc)
1587 return rc; 1658 return rc;
1588 1659
1589 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq); 1660 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1590 1661
1591 rc = alloc_ring(dev); 1662 rc = alloc_ring(dev);
1592 if (rc) { 1663 if (rc) {
1593 free_irq(rp->pdev->irq, dev); 1664 free_irq(rp->irq, dev);
1594 return rc; 1665 return rc;
1595 } 1666 }
1596 alloc_rbufs(dev); 1667 alloc_rbufs(dev);
@@ -1659,6 +1730,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1659 struct net_device *dev) 1730 struct net_device *dev)
1660{ 1731{
1661 struct rhine_private *rp = netdev_priv(dev); 1732 struct rhine_private *rp = netdev_priv(dev);
1733 struct device *hwdev = dev->dev.parent;
1662 void __iomem *ioaddr = rp->base; 1734 void __iomem *ioaddr = rp->base;
1663 unsigned entry; 1735 unsigned entry;
1664 1736
@@ -1695,9 +1767,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1695 rp->tx_bufs)); 1767 rp->tx_bufs));
1696 } else { 1768 } else {
1697 rp->tx_skbuff_dma[entry] = 1769 rp->tx_skbuff_dma[entry] =
1698 pci_map_single(rp->pdev, skb->data, skb->len, 1770 dma_map_single(hwdev, skb->data, skb->len,
1699 PCI_DMA_TODEVICE); 1771 DMA_TO_DEVICE);
1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { 1772 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1701 dev_kfree_skb_any(skb); 1773 dev_kfree_skb_any(skb);
1702 rp->tx_skbuff_dma[entry] = 0; 1774 rp->tx_skbuff_dma[entry] = 0;
1703 dev->stats.tx_dropped++; 1775 dev->stats.tx_dropped++;
@@ -1788,6 +1860,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1788static void rhine_tx(struct net_device *dev) 1860static void rhine_tx(struct net_device *dev)
1789{ 1861{
1790 struct rhine_private *rp = netdev_priv(dev); 1862 struct rhine_private *rp = netdev_priv(dev);
1863 struct device *hwdev = dev->dev.parent;
1791 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1864 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1792 1865
1793 /* find and cleanup dirty tx descriptors */ 1866 /* find and cleanup dirty tx descriptors */
@@ -1831,10 +1904,10 @@ static void rhine_tx(struct net_device *dev)
1831 } 1904 }
1832 /* Free the original skb. */ 1905 /* Free the original skb. */
1833 if (rp->tx_skbuff_dma[entry]) { 1906 if (rp->tx_skbuff_dma[entry]) {
1834 pci_unmap_single(rp->pdev, 1907 dma_unmap_single(hwdev,
1835 rp->tx_skbuff_dma[entry], 1908 rp->tx_skbuff_dma[entry],
1836 rp->tx_skbuff[entry]->len, 1909 rp->tx_skbuff[entry]->len,
1837 PCI_DMA_TODEVICE); 1910 DMA_TO_DEVICE);
1838 } 1911 }
1839 dev_consume_skb_any(rp->tx_skbuff[entry]); 1912 dev_consume_skb_any(rp->tx_skbuff[entry]);
1840 rp->tx_skbuff[entry] = NULL; 1913 rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1936,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1863static int rhine_rx(struct net_device *dev, int limit) 1936static int rhine_rx(struct net_device *dev, int limit)
1864{ 1937{
1865 struct rhine_private *rp = netdev_priv(dev); 1938 struct rhine_private *rp = netdev_priv(dev);
1939 struct device *hwdev = dev->dev.parent;
1866 int count; 1940 int count;
1867 int entry = rp->cur_rx % RX_RING_SIZE; 1941 int entry = rp->cur_rx % RX_RING_SIZE;
1868 1942
@@ -1924,19 +1998,19 @@ static int rhine_rx(struct net_device *dev, int limit)
1924 if (pkt_len < rx_copybreak) 1998 if (pkt_len < rx_copybreak)
1925 skb = netdev_alloc_skb_ip_align(dev, pkt_len); 1999 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1926 if (skb) { 2000 if (skb) {
1927 pci_dma_sync_single_for_cpu(rp->pdev, 2001 dma_sync_single_for_cpu(hwdev,
1928 rp->rx_skbuff_dma[entry], 2002 rp->rx_skbuff_dma[entry],
1929 rp->rx_buf_sz, 2003 rp->rx_buf_sz,
1930 PCI_DMA_FROMDEVICE); 2004 DMA_FROM_DEVICE);
1931 2005
1932 skb_copy_to_linear_data(skb, 2006 skb_copy_to_linear_data(skb,
1933 rp->rx_skbuff[entry]->data, 2007 rp->rx_skbuff[entry]->data,
1934 pkt_len); 2008 pkt_len);
1935 skb_put(skb, pkt_len); 2009 skb_put(skb, pkt_len);
1936 pci_dma_sync_single_for_device(rp->pdev, 2010 dma_sync_single_for_device(hwdev,
1937 rp->rx_skbuff_dma[entry], 2011 rp->rx_skbuff_dma[entry],
1938 rp->rx_buf_sz, 2012 rp->rx_buf_sz,
1939 PCI_DMA_FROMDEVICE); 2013 DMA_FROM_DEVICE);
1940 } else { 2014 } else {
1941 skb = rp->rx_skbuff[entry]; 2015 skb = rp->rx_skbuff[entry];
1942 if (skb == NULL) { 2016 if (skb == NULL) {
@@ -1945,10 +2019,10 @@ static int rhine_rx(struct net_device *dev, int limit)
1945 } 2019 }
1946 rp->rx_skbuff[entry] = NULL; 2020 rp->rx_skbuff[entry] = NULL;
1947 skb_put(skb, pkt_len); 2021 skb_put(skb, pkt_len);
1948 pci_unmap_single(rp->pdev, 2022 dma_unmap_single(hwdev,
1949 rp->rx_skbuff_dma[entry], 2023 rp->rx_skbuff_dma[entry],
1950 rp->rx_buf_sz, 2024 rp->rx_buf_sz,
1951 PCI_DMA_FROMDEVICE); 2025 DMA_FROM_DEVICE);
1952 } 2026 }
1953 2027
1954 if (unlikely(desc_length & DescTag)) 2028 if (unlikely(desc_length & DescTag))
@@ -1979,10 +2053,11 @@ static int rhine_rx(struct net_device *dev, int limit)
1979 if (skb == NULL) 2053 if (skb == NULL)
1980 break; /* Better luck next round. */ 2054 break; /* Better luck next round. */
1981 rp->rx_skbuff_dma[entry] = 2055 rp->rx_skbuff_dma[entry] =
1982 pci_map_single(rp->pdev, skb->data, 2056 dma_map_single(hwdev, skb->data,
1983 rp->rx_buf_sz, 2057 rp->rx_buf_sz,
1984 PCI_DMA_FROMDEVICE); 2058 DMA_FROM_DEVICE);
1985 if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) { 2059 if (dma_mapping_error(hwdev,
2060 rp->rx_skbuff_dma[entry])) {
1986 dev_kfree_skb(skb); 2061 dev_kfree_skb(skb);
1987 rp->rx_skbuff_dma[entry] = 0; 2062 rp->rx_skbuff_dma[entry] = 0;
1988 break; 2063 break;
@@ -2103,7 +2178,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
2103 /* Too many to match, or accept all multicasts. */ 2178 /* Too many to match, or accept all multicasts. */
2104 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2179 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2105 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2180 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2106 } else if (rp->pdev->revision >= VT6105M) { 2181 } else if (rp->quirks & rqMgmt) {
2107 int i = 0; 2182 int i = 0;
2108 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 2183 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2109 netdev_for_each_mc_addr(ha, dev) { 2184 netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2200,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
2125 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2200 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2126 } 2201 }
2127 /* enable/disable VLAN receive filtering */ 2202 /* enable/disable VLAN receive filtering */
2128 if (rp->pdev->revision >= VT6105M) { 2203 if (rp->quirks & rqMgmt) {
2129 if (dev->flags & IFF_PROMISC) 2204 if (dev->flags & IFF_PROMISC)
2130 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2205 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2131 else 2206 else
@@ -2136,11 +2211,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
2136 2211
2137static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2212static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2138{ 2213{
2139 struct rhine_private *rp = netdev_priv(dev); 2214 struct device *hwdev = dev->dev.parent;
2140 2215
2141 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2216 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2142 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2217 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2143 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); 2218 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2144} 2219}
2145 2220
2146static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2221static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2352,7 @@ static int rhine_close(struct net_device *dev)
2277 /* Stop the chip's Tx and Rx processes. */ 2352 /* Stop the chip's Tx and Rx processes. */
2278 iowrite16(CmdStop, ioaddr + ChipCmd); 2353 iowrite16(CmdStop, ioaddr + ChipCmd);
2279 2354
2280 free_irq(rp->pdev->irq, dev); 2355 free_irq(rp->irq, dev);
2281 free_rbufs(dev); 2356 free_rbufs(dev);
2282 free_tbufs(dev); 2357 free_tbufs(dev);
2283 free_ring(dev); 2358 free_ring(dev);
@@ -2286,7 +2361,7 @@ static int rhine_close(struct net_device *dev)
2286} 2361}
2287 2362
2288 2363
2289static void rhine_remove_one(struct pci_dev *pdev) 2364static void rhine_remove_one_pci(struct pci_dev *pdev)
2290{ 2365{
2291 struct net_device *dev = pci_get_drvdata(pdev); 2366 struct net_device *dev = pci_get_drvdata(pdev);
2292 struct rhine_private *rp = netdev_priv(dev); 2367 struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2375,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
2300 pci_disable_device(pdev); 2375 pci_disable_device(pdev);
2301} 2376}
2302 2377
2303static void rhine_shutdown (struct pci_dev *pdev) 2378static int rhine_remove_one_platform(struct platform_device *pdev)
2379{
2380 struct net_device *dev = platform_get_drvdata(pdev);
2381 struct rhine_private *rp = netdev_priv(dev);
2382
2383 unregister_netdev(dev);
2384
2385 iounmap(rp->base);
2386
2387 free_netdev(dev);
2388
2389 return 0;
2390}
2391
2392static void rhine_shutdown_pci(struct pci_dev *pdev)
2304{ 2393{
2305 struct net_device *dev = pci_get_drvdata(pdev); 2394 struct net_device *dev = pci_get_drvdata(pdev);
2306 struct rhine_private *rp = netdev_priv(dev); 2395 struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2443,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
2354#ifdef CONFIG_PM_SLEEP 2443#ifdef CONFIG_PM_SLEEP
2355static int rhine_suspend(struct device *device) 2444static int rhine_suspend(struct device *device)
2356{ 2445{
2357 struct pci_dev *pdev = to_pci_dev(device); 2446 struct net_device *dev = dev_get_drvdata(device);
2358 struct net_device *dev = pci_get_drvdata(pdev);
2359 struct rhine_private *rp = netdev_priv(dev); 2447 struct rhine_private *rp = netdev_priv(dev);
2360 2448
2361 if (!netif_running(dev)) 2449 if (!netif_running(dev))
@@ -2367,23 +2455,21 @@ static int rhine_suspend(struct device *device)
2367 2455
2368 netif_device_detach(dev); 2456 netif_device_detach(dev);
2369 2457
2370 rhine_shutdown(pdev); 2458 if (dev_is_pci(device))
2459 rhine_shutdown_pci(to_pci_dev(device));
2371 2460
2372 return 0; 2461 return 0;
2373} 2462}
2374 2463
2375static int rhine_resume(struct device *device) 2464static int rhine_resume(struct device *device)
2376{ 2465{
2377 struct pci_dev *pdev = to_pci_dev(device); 2466 struct net_device *dev = dev_get_drvdata(device);
2378 struct net_device *dev = pci_get_drvdata(pdev);
2379 struct rhine_private *rp = netdev_priv(dev); 2467 struct rhine_private *rp = netdev_priv(dev);
2380 2468
2381 if (!netif_running(dev)) 2469 if (!netif_running(dev))
2382 return 0; 2470 return 0;
2383 2471
2384#ifdef USE_MMIO
2385 enable_mmio(rp->pioaddr, rp->quirks); 2472 enable_mmio(rp->pioaddr, rp->quirks);
2386#endif
2387 rhine_power_init(dev); 2473 rhine_power_init(dev);
2388 free_tbufs(dev); 2474 free_tbufs(dev);
2389 free_rbufs(dev); 2475 free_rbufs(dev);
@@ -2408,15 +2494,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2408 2494
2409#endif /* !CONFIG_PM_SLEEP */ 2495#endif /* !CONFIG_PM_SLEEP */
2410 2496
2411static struct pci_driver rhine_driver = { 2497static struct pci_driver rhine_driver_pci = {
2412 .name = DRV_NAME, 2498 .name = DRV_NAME,
2413 .id_table = rhine_pci_tbl, 2499 .id_table = rhine_pci_tbl,
2414 .probe = rhine_init_one, 2500 .probe = rhine_init_one_pci,
2415 .remove = rhine_remove_one, 2501 .remove = rhine_remove_one_pci,
2416 .shutdown = rhine_shutdown, 2502 .shutdown = rhine_shutdown_pci,
2417 .driver.pm = RHINE_PM_OPS, 2503 .driver.pm = RHINE_PM_OPS,
2418}; 2504};
2419 2505
2506static struct platform_driver rhine_driver_platform = {
2507 .probe = rhine_init_one_platform,
2508 .remove = rhine_remove_one_platform,
2509 .driver = {
2510 .name = DRV_NAME,
2511 .owner = THIS_MODULE,
2512 .of_match_table = rhine_of_tbl,
2513 .pm = RHINE_PM_OPS,
2514 }
2515};
2516
2420static struct dmi_system_id rhine_dmi_table[] __initdata = { 2517static struct dmi_system_id rhine_dmi_table[] __initdata = {
2421 { 2518 {
2422 .ident = "EPIA-M", 2519 .ident = "EPIA-M",
@@ -2437,6 +2534,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
2437 2534
2438static int __init rhine_init(void) 2535static int __init rhine_init(void)
2439{ 2536{
2537 int ret_pci, ret_platform;
2538
2440/* when a module, this is printed whether or not devices are found in probe */ 2539/* when a module, this is printed whether or not devices are found in probe */
2441#ifdef MODULE 2540#ifdef MODULE
2442 pr_info("%s\n", version); 2541 pr_info("%s\n", version);
@@ -2449,13 +2548,19 @@ static int __init rhine_init(void)
2449 else if (avoid_D3) 2548 else if (avoid_D3)
2450 pr_info("avoid_D3 set\n"); 2549 pr_info("avoid_D3 set\n");
2451 2550
2452 return pci_register_driver(&rhine_driver); 2551 ret_pci = pci_register_driver(&rhine_driver_pci);
2552 ret_platform = platform_driver_register(&rhine_driver_platform);
2553 if ((ret_pci < 0) && (ret_platform < 0))
2554 return ret_pci;
2555
2556 return 0;
2453} 2557}
2454 2558
2455 2559
2456static void __exit rhine_cleanup(void) 2560static void __exit rhine_cleanup(void)
2457{ 2561{
2458 pci_unregister_driver(&rhine_driver); 2562 platform_driver_unregister(&rhine_driver_platform);
2563 pci_unregister_driver(&rhine_driver_pci);
2459} 2564}
2460 2565
2461 2566
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fa193c4688da..4ef818a7a6c6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
75 long end = jiffies + 2; 75 long end = jiffies + 2;
76 76
77 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { 77 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
78 if (end - jiffies <= 0) { 78 if (time_before_eq(end, jiffies)) {
79 WARN_ON(1); 79 WARN_ON(1);
80 return -ETIMEDOUT; 80 return -ETIMEDOUT;
81 } 81 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 64b4639f43b6..d4abf478e2bb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
22 long end = jiffies + 2; 22 long end = jiffies + 2;
23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) & 23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
24 XAE_MDIO_MCR_READY_MASK)) { 24 XAE_MDIO_MCR_READY_MASK)) {
25 if (end - jiffies <= 0) { 25 if (time_before_eq(end, jiffies)) {
26 WARN_ON(1); 26 WARN_ON(1);
27 return -ETIMEDOUT; 27 return -ETIMEDOUT;
28 } 28 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0d87c67a5ff7..8c4aed3053eb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
702 */ 702 */
703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 703 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
704 XEL_MDIOCTRL_MDIOSTS_MASK) { 704 XEL_MDIOCTRL_MDIOSTS_MASK) {
705 if (end - jiffies <= 0) { 705 if (time_before_eq(end, jiffies)) {
706 WARN_ON(1); 706 WARN_ON(1);
707 return -ETIMEDOUT; 707 return -ETIMEDOUT;
708 } 708 }
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d18f711d0b0c..6cc37c15e0bf 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -28,50 +28,119 @@
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/rndis.h> 29#include <linux/rndis.h>
30 30
31/* Fwd declaration */ 31/* RSS related */
32struct hv_netvsc_packet; 32#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
33struct ndis_tcp_ip_checksum_info; 33#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 /* query and set */
34 34
35/* Represent the xfer page packet which contains 1 or more netvsc packet */ 35#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
36struct xferpage_packet { 36#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
37 struct list_head list_ent;
38 u32 status;
39 37
40 /* # of netvsc packets this xfer packet contains */ 38#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
41 u32 count; 39#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
40
41struct ndis_obj_header {
42 u8 type;
43 u8 rev;
44 u16 size;
45} __packed;
46
47/* ndis_recv_scale_cap/cap_flag */
48#define NDIS_RSS_CAPS_MESSAGE_SIGNALED_INTERRUPTS 0x01000000
49#define NDIS_RSS_CAPS_CLASSIFICATION_AT_ISR 0x02000000
50#define NDIS_RSS_CAPS_CLASSIFICATION_AT_DPC 0x04000000
51#define NDIS_RSS_CAPS_USING_MSI_X 0x08000000
52#define NDIS_RSS_CAPS_RSS_AVAILABLE_ON_PORTS 0x10000000
53#define NDIS_RSS_CAPS_SUPPORTS_MSI_X 0x20000000
54#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV4 0x00000100
55#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6 0x00000200
56#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6_EX 0x00000400
57
58struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
59 struct ndis_obj_header hdr;
60 u32 cap_flag;
61 u32 num_int_msg;
62 u32 num_recv_que;
63 u16 num_indirect_tabent;
64} __packed;
65
66
67/* ndis_recv_scale_param flags */
68#define NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED 0x0001
69#define NDIS_RSS_PARAM_FLAG_HASH_INFO_UNCHANGED 0x0002
70#define NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED 0x0004
71#define NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED 0x0008
72#define NDIS_RSS_PARAM_FLAG_DISABLE_RSS 0x0010
73
74/* Hash info bits */
75#define NDIS_HASH_FUNC_TOEPLITZ 0x00000001
76#define NDIS_HASH_IPV4 0x00000100
77#define NDIS_HASH_TCP_IPV4 0x00000200
78#define NDIS_HASH_IPV6 0x00000400
79#define NDIS_HASH_IPV6_EX 0x00000800
80#define NDIS_HASH_TCP_IPV6 0x00001000
81#define NDIS_HASH_TCP_IPV6_EX 0x00002000
82
83#define NDIS_RSS_INDIRECTION_TABLE_MAX_SIZE_REVISION_2 (128 * 4)
84#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
85
86#define ITAB_NUM 128
87#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
88extern u8 netvsc_hash_key[];
89
90struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
91 struct ndis_obj_header hdr;
92
93 /* Qualifies the rest of the information */
94 u16 flag;
95
96 /* The base CPU number to do receive processing. not used */
97 u16 base_cpu_number;
98
99 /* This describes the hash function and type being enabled */
100 u32 hashinfo;
101
102 /* The size of indirection table array */
103 u16 indirect_tabsize;
104
105 /* The offset of the indirection table from the beginning of this
106 * structure
107 */
108 u32 indirect_taboffset;
109
110 /* The size of the hash secret key */
111 u16 hashkey_size;
112
113 /* The offset of the secret key from the beginning of this structure */
114 u32 kashkey_offset;
115
116 u32 processor_masks_offset;
117 u32 num_processor_masks;
118 u32 processor_masks_entry_size;
42}; 119};
43 120
121/* Fwd declaration */
122struct ndis_tcp_ip_checksum_info;
123
44/* 124/*
45 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame 125 * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
46 * within the RNDIS 126 * within the RNDIS
47 */ 127 */
48struct hv_netvsc_packet { 128struct hv_netvsc_packet {
49 /* Bookkeeping stuff */ 129 /* Bookkeeping stuff */
50 struct list_head list_ent;
51 u32 status; 130 u32 status;
52 131
53 struct hv_device *device; 132 struct hv_device *device;
54 bool is_data_pkt; 133 bool is_data_pkt;
55 u16 vlan_tci; 134 u16 vlan_tci;
56 135
57 /* 136 u16 q_idx;
58 * Valid only for receives when we break a xfer page packet 137 struct vmbus_channel *channel;
59 * into multiple netvsc packets
60 */
61 struct xferpage_packet *xfer_page_pkt;
62 138
63 union { 139 u64 send_completion_tid;
64 struct { 140 void *send_completion_ctx;
65 u64 recv_completion_tid; 141 void (*send_completion)(void *context);
66 void *recv_completion_ctx; 142
67 void (*recv_completion)(void *context); 143 u32 send_buf_index;
68 } recv;
69 struct {
70 u64 send_completion_tid;
71 void *send_completion_ctx;
72 void (*send_completion)(void *context);
73 } send;
74 } completion;
75 144
76 /* This points to the memory after page_buf */ 145 /* This points to the memory after page_buf */
77 struct rndis_message *rndis_msg; 146 struct rndis_message *rndis_msg;
@@ -120,6 +189,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
120int netvsc_recv_callback(struct hv_device *device_obj, 189int netvsc_recv_callback(struct hv_device *device_obj,
121 struct hv_netvsc_packet *packet, 190 struct hv_netvsc_packet *packet,
122 struct ndis_tcp_ip_checksum_info *csum_info); 191 struct ndis_tcp_ip_checksum_info *csum_info);
192void netvsc_channel_cb(void *context);
123int rndis_filter_open(struct hv_device *dev); 193int rndis_filter_open(struct hv_device *dev);
124int rndis_filter_close(struct hv_device *dev); 194int rndis_filter_close(struct hv_device *dev);
125int rndis_filter_device_add(struct hv_device *dev, 195int rndis_filter_device_add(struct hv_device *dev,
@@ -514,14 +584,16 @@ struct nvsp_message {
514 584
515#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ 585#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
516#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ 586#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
587#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024) /* 1MB */
588#define NETVSC_INVALID_INDEX -1
517 589
518#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
519 590
520/* Preallocated receive packets */ 591#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
521#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
522 592
523#define NETVSC_PACKET_SIZE 2048 593#define NETVSC_PACKET_SIZE 2048
524 594
595#define VRSS_SEND_TAB_SIZE 16
596
525/* Per netvsc channel-specific */ 597/* Per netvsc channel-specific */
526struct netvsc_device { 598struct netvsc_device {
527 struct hv_device *dev; 599 struct hv_device *dev;
@@ -532,12 +604,6 @@ struct netvsc_device {
532 wait_queue_head_t wait_drain; 604 wait_queue_head_t wait_drain;
533 bool start_remove; 605 bool start_remove;
534 bool destroy; 606 bool destroy;
535 /*
536 * List of free preallocated hv_netvsc_packet to represent receive
537 * packet
538 */
539 struct list_head recv_pkt_list;
540 spinlock_t recv_pkt_list_lock;
541 607
542 /* Receive buffer allocated by us but manages by NetVSP */ 608 /* Receive buffer allocated by us but manages by NetVSP */
543 void *recv_buf; 609 void *recv_buf;
@@ -546,6 +612,15 @@ struct netvsc_device {
546 u32 recv_section_cnt; 612 u32 recv_section_cnt;
547 struct nvsp_1_receive_buffer_section *recv_section; 613 struct nvsp_1_receive_buffer_section *recv_section;
548 614
615 /* Send buffer allocated by us */
616 void *send_buf;
617 u32 send_buf_size;
618 u32 send_buf_gpadl_handle;
619 u32 send_section_cnt;
620 u32 send_section_size;
621 unsigned long *send_section_map;
622 int map_words;
623
549 /* Used for NetVSP initialization protocol */ 624 /* Used for NetVSP initialization protocol */
550 struct completion channel_init_wait; 625 struct completion channel_init_wait;
551 struct nvsp_message channel_init_pkt; 626 struct nvsp_message channel_init_pkt;
@@ -555,10 +630,20 @@ struct netvsc_device {
555 630
556 struct net_device *ndev; 631 struct net_device *ndev;
557 632
633 struct vmbus_channel *chn_table[NR_CPUS];
634 u32 send_table[VRSS_SEND_TAB_SIZE];
635 u32 num_chn;
636 atomic_t queue_sends[NR_CPUS];
637
558 /* Holds rndis device info */ 638 /* Holds rndis device info */
559 void *extension; 639 void *extension;
560 /* The recive buffer for this device */ 640
641 int ring_size;
642
643 /* The primary channel callback buffer */
561 unsigned char cb_buffer[NETVSC_PACKET_SIZE]; 644 unsigned char cb_buffer[NETVSC_PACKET_SIZE];
645 /* The sub channel callback buffer */
646 unsigned char *sub_cb_buf;
562}; 647};
563 648
564/* NdisInitialize message */ 649/* NdisInitialize message */
@@ -706,6 +791,7 @@ enum ndis_per_pkt_info_type {
706 IEEE_8021Q_INFO, 791 IEEE_8021Q_INFO,
707 ORIGINAL_PKTINFO, 792 ORIGINAL_PKTINFO,
708 PACKET_CANCEL_ID, 793 PACKET_CANCEL_ID,
794 NBL_HASH_VALUE = PACKET_CANCEL_ID,
709 ORIGINAL_NET_BUFLIST, 795 ORIGINAL_NET_BUFLIST,
710 CACHED_NET_BUFLIST, 796 CACHED_NET_BUFLIST,
711 SHORT_PKT_PADINFO, 797 SHORT_PKT_PADINFO,
@@ -852,6 +938,9 @@ struct ndis_tcp_lso_info {
852#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \ 938#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
853 sizeof(struct ndis_tcp_lso_info)) 939 sizeof(struct ndis_tcp_lso_info))
854 940
941#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
942 sizeof(u32))
943
855/* Format of Information buffer passed in a SetRequest for the OID */ 944/* Format of Information buffer passed in a SetRequest for the OID */
856/* OID_GEN_RNDIS_CONFIG_PARAMETER. */ 945/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
857struct rndis_config_parameter_info { 946struct rndis_config_parameter_info {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f7629ecefa84..c041f63a6d30 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <asm/sync_bitops.h>
31 32
32#include "hyperv_net.h" 33#include "hyperv_net.h"
33 34
@@ -80,7 +81,7 @@ get_in_err:
80} 81}
81 82
82 83
83static int netvsc_destroy_recv_buf(struct netvsc_device *net_device) 84static int netvsc_destroy_buf(struct netvsc_device *net_device)
84{ 85{
85 struct nvsp_message *revoke_packet; 86 struct nvsp_message *revoke_packet;
86 int ret = 0; 87 int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
146 net_device->recv_section = NULL; 147 net_device->recv_section = NULL;
147 } 148 }
148 149
150 /* Deal with the send buffer we may have setup.
151 * If we got a send section size, it means we received a
152 * SendsendBufferComplete msg (ie sent
153 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
154 * to send a revoke msg here
155 */
156 if (net_device->send_section_size) {
157 /* Send the revoke receive buffer */
158 revoke_packet = &net_device->revoke_packet;
159 memset(revoke_packet, 0, sizeof(struct nvsp_message));
160
161 revoke_packet->hdr.msg_type =
162 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
163 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
164
165 ret = vmbus_sendpacket(net_device->dev->channel,
166 revoke_packet,
167 sizeof(struct nvsp_message),
168 (unsigned long)revoke_packet,
169 VM_PKT_DATA_INBAND, 0);
170 /* If we failed here, we might as well return and
171 * have a leak rather than continue and a bugchk
172 */
173 if (ret != 0) {
174 netdev_err(ndev, "unable to send "
175 "revoke send buffer to netvsp\n");
176 return ret;
177 }
178 }
179 /* Teardown the gpadl on the vsp end */
180 if (net_device->send_buf_gpadl_handle) {
181 ret = vmbus_teardown_gpadl(net_device->dev->channel,
182 net_device->send_buf_gpadl_handle);
183
184 /* If we failed here, we might as well return and have a leak
185 * rather than continue and a bugchk
186 */
187 if (ret != 0) {
188 netdev_err(ndev,
189 "unable to teardown send buffer's gpadl\n");
190 return ret;
191 }
192 net_device->recv_buf_gpadl_handle = 0;
193 }
194 if (net_device->send_buf) {
195 /* Free up the receive buffer */
196 free_pages((unsigned long)net_device->send_buf,
197 get_order(net_device->send_buf_size));
198 net_device->send_buf = NULL;
199 }
200 kfree(net_device->send_section_map);
201
149 return ret; 202 return ret;
150} 203}
151 204
152static int netvsc_init_recv_buf(struct hv_device *device) 205static int netvsc_init_buf(struct hv_device *device)
153{ 206{
154 int ret = 0; 207 int ret = 0;
155 int t; 208 int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
248 goto cleanup; 301 goto cleanup;
249 } 302 }
250 303
304 /* Now setup the send buffer.
305 */
306 net_device->send_buf =
307 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
308 get_order(net_device->send_buf_size));
309 if (!net_device->send_buf) {
310 netdev_err(ndev, "unable to allocate send "
311 "buffer of size %d\n", net_device->send_buf_size);
312 ret = -ENOMEM;
313 goto cleanup;
314 }
315
316 /* Establish the gpadl handle for this buffer on this
317 * channel. Note: This call uses the vmbus connection rather
318 * than the channel to establish the gpadl handle.
319 */
320 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
321 net_device->send_buf_size,
322 &net_device->send_buf_gpadl_handle);
323 if (ret != 0) {
324 netdev_err(ndev,
325 "unable to establish send buffer's gpadl\n");
326 goto cleanup;
327 }
328
329 /* Notify the NetVsp of the gpadl handle */
330 init_packet = &net_device->channel_init_pkt;
331 memset(init_packet, 0, sizeof(struct nvsp_message));
332 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
333 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
334 net_device->send_buf_gpadl_handle;
335 init_packet->msg.v1_msg.send_recv_buf.id = 0;
336
337 /* Send the gpadl notification request */
338 ret = vmbus_sendpacket(device->channel, init_packet,
339 sizeof(struct nvsp_message),
340 (unsigned long)init_packet,
341 VM_PKT_DATA_INBAND,
342 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
343 if (ret != 0) {
344 netdev_err(ndev,
345 "unable to send send buffer's gpadl to netvsp\n");
346 goto cleanup;
347 }
348
349 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
350 BUG_ON(t == 0);
351
352 /* Check the response */
353 if (init_packet->msg.v1_msg.
354 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
355 netdev_err(ndev, "Unable to complete send buffer "
356 "initialization with NetVsp - status %d\n",
357 init_packet->msg.v1_msg.
358 send_recv_buf_complete.status);
359 ret = -EINVAL;
360 goto cleanup;
361 }
362
363 /* Parse the response */
364 net_device->send_section_size = init_packet->msg.
365 v1_msg.send_send_buf_complete.section_size;
366
367 /* Section count is simply the size divided by the section size.
368 */
369 net_device->send_section_cnt =
370 net_device->send_buf_size/net_device->send_section_size;
371
372 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
373 net_device->send_section_size, net_device->send_section_cnt);
374
375 /* Setup state for managing the send buffer. */
376 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
377 BITS_PER_LONG);
378
379 net_device->send_section_map =
380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
381 if (net_device->send_section_map == NULL)
382 goto cleanup;
383
251 goto exit; 384 goto exit;
252 385
253cleanup: 386cleanup:
254 netvsc_destroy_recv_buf(net_device); 387 netvsc_destroy_buf(net_device);
255 388
256exit: 389exit:
257 return ret; 390 return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
369 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 502 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
370 else 503 else
371 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 504 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
505 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
372 506
373 ret = netvsc_init_recv_buf(device); 507 ret = netvsc_init_buf(device);
374 508
375cleanup: 509cleanup:
376 return ret; 510 return ret;
@@ -378,7 +512,7 @@ cleanup:
378 512
379static void netvsc_disconnect_vsp(struct netvsc_device *net_device) 513static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
380{ 514{
381 netvsc_destroy_recv_buf(net_device); 515 netvsc_destroy_buf(net_device);
382} 516}
383 517
384/* 518/*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
387int netvsc_device_remove(struct hv_device *device) 521int netvsc_device_remove(struct hv_device *device)
388{ 522{
389 struct netvsc_device *net_device; 523 struct netvsc_device *net_device;
390 struct hv_netvsc_packet *netvsc_packet, *pos;
391 unsigned long flags; 524 unsigned long flags;
392 525
393 net_device = hv_get_drvdata(device); 526 net_device = hv_get_drvdata(device);
@@ -416,11 +549,8 @@ int netvsc_device_remove(struct hv_device *device)
416 vmbus_close(device->channel); 549 vmbus_close(device->channel);
417 550
418 /* Release all resources */ 551 /* Release all resources */
419 list_for_each_entry_safe(netvsc_packet, pos, 552 if (net_device->sub_cb_buf)
420 &net_device->recv_pkt_list, list_ent) { 553 vfree(net_device->sub_cb_buf);
421 list_del(&netvsc_packet->list_ent);
422 kfree(netvsc_packet);
423 }
424 554
425 kfree(net_device); 555 kfree(net_device);
426 return 0; 556 return 0;
@@ -444,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
444 return avail_write * 100 / ring_info->ring_datasize; 574 return avail_write * 100 / ring_info->ring_datasize;
445} 575}
446 576
577static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
578 u32 index)
579{
580 sync_change_bit(index, net_device->send_section_map);
581}
582
447static void netvsc_send_completion(struct netvsc_device *net_device, 583static void netvsc_send_completion(struct netvsc_device *net_device,
448 struct hv_device *device, 584 struct hv_device *device,
449 struct vmpacket_descriptor *packet) 585 struct vmpacket_descriptor *packet)
@@ -451,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
451 struct nvsp_message *nvsp_packet; 587 struct nvsp_message *nvsp_packet;
452 struct hv_netvsc_packet *nvsc_packet; 588 struct hv_netvsc_packet *nvsc_packet;
453 struct net_device *ndev; 589 struct net_device *ndev;
590 u32 send_index;
454 591
455 ndev = net_device->ndev; 592 ndev = net_device->ndev;
456 593
@@ -461,7 +598,9 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
461 (nvsp_packet->hdr.msg_type == 598 (nvsp_packet->hdr.msg_type ==
462 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || 599 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
463 (nvsp_packet->hdr.msg_type == 600 (nvsp_packet->hdr.msg_type ==
464 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) { 601 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
602 (nvsp_packet->hdr.msg_type ==
603 NVSP_MSG5_TYPE_SUBCHANNEL)) {
465 /* Copy the response back */ 604 /* Copy the response back */
466 memcpy(&net_device->channel_init_pkt, nvsp_packet, 605 memcpy(&net_device->channel_init_pkt, nvsp_packet,
467 sizeof(struct nvsp_message)); 606 sizeof(struct nvsp_message));
@@ -469,28 +608,39 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
469 } else if (nvsp_packet->hdr.msg_type == 608 } else if (nvsp_packet->hdr.msg_type ==
470 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 609 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
471 int num_outstanding_sends; 610 int num_outstanding_sends;
611 u16 q_idx = 0;
612 struct vmbus_channel *channel = device->channel;
613 int queue_sends;
472 614
473 /* Get the send context */ 615 /* Get the send context */
474 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 616 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
475 packet->trans_id; 617 packet->trans_id;
476 618
477 /* Notify the layer above us */ 619 /* Notify the layer above us */
478 if (nvsc_packet) 620 if (nvsc_packet) {
479 nvsc_packet->completion.send.send_completion( 621 send_index = nvsc_packet->send_buf_index;
480 nvsc_packet->completion.send. 622 if (send_index != NETVSC_INVALID_INDEX)
481 send_completion_ctx); 623 netvsc_free_send_slot(net_device, send_index);
624 q_idx = nvsc_packet->q_idx;
625 channel = nvsc_packet->channel;
626 nvsc_packet->send_completion(nvsc_packet->
627 send_completion_ctx);
628 }
482 629
483 num_outstanding_sends = 630 num_outstanding_sends =
484 atomic_dec_return(&net_device->num_outstanding_sends); 631 atomic_dec_return(&net_device->num_outstanding_sends);
632 queue_sends = atomic_dec_return(&net_device->
633 queue_sends[q_idx]);
485 634
486 if (net_device->destroy && num_outstanding_sends == 0) 635 if (net_device->destroy && num_outstanding_sends == 0)
487 wake_up(&net_device->wait_drain); 636 wake_up(&net_device->wait_drain);
488 637
489 if (netif_queue_stopped(ndev) && !net_device->start_remove && 638 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
490 (hv_ringbuf_avail_percent(&device->channel->outbound) 639 !net_device->start_remove &&
491 > RING_AVAIL_PERCENT_HIWATER || 640 (hv_ringbuf_avail_percent(&channel->outbound) >
492 num_outstanding_sends < 1)) 641 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
493 netif_wake_queue(ndev); 642 netif_tx_wake_queue(netdev_get_tx_queue(
643 ndev, q_idx));
494 } else { 644 } else {
495 netdev_err(ndev, "Unknown send completion packet type- " 645 netdev_err(ndev, "Unknown send completion packet type- "
496 "%d received!!\n", nvsp_packet->hdr.msg_type); 646 "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -498,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
498 648
499} 649}
500 650
651static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
652{
653 unsigned long index;
654 u32 max_words = net_device->map_words;
655 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
656 u32 section_cnt = net_device->send_section_cnt;
657 int ret_val = NETVSC_INVALID_INDEX;
658 int i;
659 int prev_val;
660
661 for (i = 0; i < max_words; i++) {
662 if (!~(map_addr[i]))
663 continue;
664 index = ffz(map_addr[i]);
665 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
666 if (prev_val)
667 continue;
668 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
669 break;
670 ret_val = (index + (i * BITS_PER_LONG));
671 break;
672 }
673 return ret_val;
674}
675
676u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
677 unsigned int section_index,
678 struct hv_netvsc_packet *packet)
679{
680 char *start = net_device->send_buf;
681 char *dest = (start + (section_index * net_device->send_section_size));
682 int i;
683 u32 msg_size = 0;
684
685 for (i = 0; i < packet->page_buf_cnt; i++) {
686 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
687 u32 offset = packet->page_buf[i].offset;
688 u32 len = packet->page_buf[i].len;
689
690 memcpy(dest, (src + offset), len);
691 msg_size += len;
692 dest += len;
693 }
694 return msg_size;
695}
696
501int netvsc_send(struct hv_device *device, 697int netvsc_send(struct hv_device *device,
502 struct hv_netvsc_packet *packet) 698 struct hv_netvsc_packet *packet)
503{ 699{
@@ -505,7 +701,12 @@ int netvsc_send(struct hv_device *device,
505 int ret = 0; 701 int ret = 0;
506 struct nvsp_message sendMessage; 702 struct nvsp_message sendMessage;
507 struct net_device *ndev; 703 struct net_device *ndev;
704 struct vmbus_channel *out_channel = NULL;
508 u64 req_id; 705 u64 req_id;
706 unsigned int section_index = NETVSC_INVALID_INDEX;
707 u32 msg_size = 0;
708 struct sk_buff *skb;
709
509 710
510 net_device = get_outbound_net_device(device); 711 net_device = get_outbound_net_device(device);
511 if (!net_device) 712 if (!net_device)
@@ -521,25 +722,46 @@ int netvsc_send(struct hv_device *device,
521 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; 722 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
522 } 723 }
523 724
524 /* Not using send buffer section */ 725 /* Attempt to send via sendbuf */
726 if (packet->total_data_buflen < net_device->send_section_size) {
727 section_index = netvsc_get_next_send_section(net_device);
728 if (section_index != NETVSC_INVALID_INDEX) {
729 msg_size = netvsc_copy_to_send_buf(net_device,
730 section_index,
731 packet);
732 skb = (struct sk_buff *)
733 (unsigned long)packet->send_completion_tid;
734 if (skb)
735 dev_kfree_skb_any(skb);
736 packet->page_buf_cnt = 0;
737 }
738 }
739 packet->send_buf_index = section_index;
740
741
525 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = 742 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
526 0xFFFFFFFF; 743 section_index;
527 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; 744 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
528 745
529 if (packet->completion.send.send_completion) 746 if (packet->send_completion)
530 req_id = (ulong)packet; 747 req_id = (ulong)packet;
531 else 748 else
532 req_id = 0; 749 req_id = 0;
533 750
751 out_channel = net_device->chn_table[packet->q_idx];
752 if (out_channel == NULL)
753 out_channel = device->channel;
754 packet->channel = out_channel;
755
534 if (packet->page_buf_cnt) { 756 if (packet->page_buf_cnt) {
535 ret = vmbus_sendpacket_pagebuffer(device->channel, 757 ret = vmbus_sendpacket_pagebuffer(out_channel,
536 packet->page_buf, 758 packet->page_buf,
537 packet->page_buf_cnt, 759 packet->page_buf_cnt,
538 &sendMessage, 760 &sendMessage,
539 sizeof(struct nvsp_message), 761 sizeof(struct nvsp_message),
540 req_id); 762 req_id);
541 } else { 763 } else {
542 ret = vmbus_sendpacket(device->channel, &sendMessage, 764 ret = vmbus_sendpacket(out_channel, &sendMessage,
543 sizeof(struct nvsp_message), 765 sizeof(struct nvsp_message),
544 req_id, 766 req_id,
545 VM_PKT_DATA_INBAND, 767 VM_PKT_DATA_INBAND,
@@ -548,17 +770,24 @@ int netvsc_send(struct hv_device *device,
548 770
549 if (ret == 0) { 771 if (ret == 0) {
550 atomic_inc(&net_device->num_outstanding_sends); 772 atomic_inc(&net_device->num_outstanding_sends);
551 if (hv_ringbuf_avail_percent(&device->channel->outbound) < 773 atomic_inc(&net_device->queue_sends[packet->q_idx]);
774
775 if (hv_ringbuf_avail_percent(&out_channel->outbound) <
552 RING_AVAIL_PERCENT_LOWATER) { 776 RING_AVAIL_PERCENT_LOWATER) {
553 netif_stop_queue(ndev); 777 netif_tx_stop_queue(netdev_get_tx_queue(
778 ndev, packet->q_idx));
779
554 if (atomic_read(&net_device-> 780 if (atomic_read(&net_device->
555 num_outstanding_sends) < 1) 781 queue_sends[packet->q_idx]) < 1)
556 netif_wake_queue(ndev); 782 netif_tx_wake_queue(netdev_get_tx_queue(
783 ndev, packet->q_idx));
557 } 784 }
558 } else if (ret == -EAGAIN) { 785 } else if (ret == -EAGAIN) {
559 netif_stop_queue(ndev); 786 netif_tx_stop_queue(netdev_get_tx_queue(
560 if (atomic_read(&net_device->num_outstanding_sends) < 1) { 787 ndev, packet->q_idx));
561 netif_wake_queue(ndev); 788 if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
789 netif_tx_wake_queue(netdev_get_tx_queue(
790 ndev, packet->q_idx));
562 ret = -ENOSPC; 791 ret = -ENOSPC;
563 } 792 }
564 } else { 793 } else {
@@ -570,6 +799,7 @@ int netvsc_send(struct hv_device *device,
570} 799}
571 800
572static void netvsc_send_recv_completion(struct hv_device *device, 801static void netvsc_send_recv_completion(struct hv_device *device,
802 struct vmbus_channel *channel,
573 struct netvsc_device *net_device, 803 struct netvsc_device *net_device,
574 u64 transaction_id, u32 status) 804 u64 transaction_id, u32 status)
575{ 805{
@@ -587,7 +817,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
587 817
588retry_send_cmplt: 818retry_send_cmplt:
589 /* Send the completion */ 819 /* Send the completion */
590 ret = vmbus_sendpacket(device->channel, &recvcompMessage, 820 ret = vmbus_sendpacket(channel, &recvcompMessage,
591 sizeof(struct nvsp_message), transaction_id, 821 sizeof(struct nvsp_message), transaction_id,
592 VM_PKT_COMP, 0); 822 VM_PKT_COMP, 0);
593 if (ret == 0) { 823 if (ret == 0) {
@@ -613,76 +843,20 @@ retry_send_cmplt:
613 } 843 }
614} 844}
615 845
616/* Send a receive completion packet to RNDIS device (ie NetVsp) */
617static void netvsc_receive_completion(void *context)
618{
619 struct hv_netvsc_packet *packet = context;
620 struct hv_device *device = packet->device;
621 struct netvsc_device *net_device;
622 u64 transaction_id = 0;
623 bool fsend_receive_comp = false;
624 unsigned long flags;
625 struct net_device *ndev;
626 u32 status = NVSP_STAT_NONE;
627
628 /*
629 * Even though it seems logical to do a GetOutboundNetDevice() here to
630 * send out receive completion, we are using GetInboundNetDevice()
631 * since we may have disable outbound traffic already.
632 */
633 net_device = get_inbound_net_device(device);
634 if (!net_device)
635 return;
636 ndev = net_device->ndev;
637
638 /* Overloading use of the lock. */
639 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
640
641 if (packet->status != NVSP_STAT_SUCCESS)
642 packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
643
644 packet->xfer_page_pkt->count--;
645
646 /*
647 * Last one in the line that represent 1 xfer page packet.
648 * Return the xfer page packet itself to the freelist
649 */
650 if (packet->xfer_page_pkt->count == 0) {
651 fsend_receive_comp = true;
652 transaction_id = packet->completion.recv.recv_completion_tid;
653 status = packet->xfer_page_pkt->status;
654 list_add_tail(&packet->xfer_page_pkt->list_ent,
655 &net_device->recv_pkt_list);
656
657 }
658
659 /* Put the packet back */
660 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
661 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
662
663 /* Send a receive completion for the xfer page packet */
664 if (fsend_receive_comp)
665 netvsc_send_recv_completion(device, net_device, transaction_id,
666 status);
667
668}
669
670static void netvsc_receive(struct netvsc_device *net_device, 846static void netvsc_receive(struct netvsc_device *net_device,
847 struct vmbus_channel *channel,
671 struct hv_device *device, 848 struct hv_device *device,
672 struct vmpacket_descriptor *packet) 849 struct vmpacket_descriptor *packet)
673{ 850{
674 struct vmtransfer_page_packet_header *vmxferpage_packet; 851 struct vmtransfer_page_packet_header *vmxferpage_packet;
675 struct nvsp_message *nvsp_packet; 852 struct nvsp_message *nvsp_packet;
676 struct hv_netvsc_packet *netvsc_packet = NULL; 853 struct hv_netvsc_packet nv_pkt;
677 /* struct netvsc_driver *netvscDriver; */ 854 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
678 struct xferpage_packet *xferpage_packet = NULL; 855 u32 status = NVSP_STAT_SUCCESS;
679 int i; 856 int i;
680 int count = 0; 857 int count = 0;
681 unsigned long flags;
682 struct net_device *ndev; 858 struct net_device *ndev;
683 859
684 LIST_HEAD(listHead);
685
686 ndev = net_device->ndev; 860 ndev = net_device->ndev;
687 861
688 /* 862 /*
@@ -715,77 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
715 return; 889 return;
716 } 890 }
717 891
718 /* 892 count = vmxferpage_packet->range_cnt;
719 * Grab free packets (range count + 1) to represent this xfer 893 netvsc_packet->device = device;
720 * page packet. +1 to represent the xfer page packet itself. 894 netvsc_packet->channel = channel;
721 * We grab it here so that we know exactly how many we can
722 * fulfil
723 */
724 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
725 while (!list_empty(&net_device->recv_pkt_list)) {
726 list_move_tail(net_device->recv_pkt_list.next, &listHead);
727 if (++count == vmxferpage_packet->range_cnt + 1)
728 break;
729 }
730 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
731
732 /*
733 * We need at least 2 netvsc pkts (1 to represent the xfer
734 * page and at least 1 for the range) i.e. we can handled
735 * some of the xfer page packet ranges...
736 */
737 if (count < 2) {
738 netdev_err(ndev, "Got only %d netvsc pkt...needed "
739 "%d pkts. Dropping this xfer page packet completely!\n",
740 count, vmxferpage_packet->range_cnt + 1);
741
742 /* Return it to the freelist */
743 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
744 for (i = count; i != 0; i--) {
745 list_move_tail(listHead.next,
746 &net_device->recv_pkt_list);
747 }
748 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
749 flags);
750
751 netvsc_send_recv_completion(device, net_device,
752 vmxferpage_packet->d.trans_id,
753 NVSP_STAT_FAIL);
754
755 return;
756 }
757
758 /* Remove the 1st packet to represent the xfer page packet itself */
759 xferpage_packet = (struct xferpage_packet *)listHead.next;
760 list_del(&xferpage_packet->list_ent);
761 xferpage_packet->status = NVSP_STAT_SUCCESS;
762
763 /* This is how much we can satisfy */
764 xferpage_packet->count = count - 1;
765
766 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
767 netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
768 "this xfer page...got %d\n",
769 vmxferpage_packet->range_cnt, xferpage_packet->count);
770 }
771 895
772 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 896 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
773 for (i = 0; i < (count - 1); i++) { 897 for (i = 0; i < count; i++) {
774 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
775 list_del(&netvsc_packet->list_ent);
776
777 /* Initialize the netvsc packet */ 898 /* Initialize the netvsc packet */
778 netvsc_packet->status = NVSP_STAT_SUCCESS; 899 netvsc_packet->status = NVSP_STAT_SUCCESS;
779 netvsc_packet->xfer_page_pkt = xferpage_packet;
780 netvsc_packet->completion.recv.recv_completion =
781 netvsc_receive_completion;
782 netvsc_packet->completion.recv.recv_completion_ctx =
783 netvsc_packet;
784 netvsc_packet->device = device;
785 /* Save this so that we can send it back */
786 netvsc_packet->completion.recv.recv_completion_tid =
787 vmxferpage_packet->d.trans_id;
788
789 netvsc_packet->data = (void *)((unsigned long)net_device-> 900 netvsc_packet->data = (void *)((unsigned long)net_device->
790 recv_buf + vmxferpage_packet->ranges[i].byte_offset); 901 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
791 netvsc_packet->total_data_buflen = 902 netvsc_packet->total_data_buflen =
@@ -794,16 +905,53 @@ static void netvsc_receive(struct netvsc_device *net_device,
794 /* Pass it to the upper layer */ 905 /* Pass it to the upper layer */
795 rndis_filter_receive(device, netvsc_packet); 906 rndis_filter_receive(device, netvsc_packet);
796 907
797 netvsc_receive_completion(netvsc_packet-> 908 if (netvsc_packet->status != NVSP_STAT_SUCCESS)
798 completion.recv.recv_completion_ctx); 909 status = NVSP_STAT_FAIL;
910 }
911
912 netvsc_send_recv_completion(device, channel, net_device,
913 vmxferpage_packet->d.trans_id, status);
914}
915
916
917static void netvsc_send_table(struct hv_device *hdev,
918 struct vmpacket_descriptor *vmpkt)
919{
920 struct netvsc_device *nvscdev;
921 struct net_device *ndev;
922 struct nvsp_message *nvmsg;
923 int i;
924 u32 count, *tab;
925
926 nvscdev = get_outbound_net_device(hdev);
927 if (!nvscdev)
928 return;
929 ndev = nvscdev->ndev;
930
931 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
932 (vmpkt->offset8 << 3));
933
934 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
935 return;
936
937 count = nvmsg->msg.v5_msg.send_table.count;
938 if (count != VRSS_SEND_TAB_SIZE) {
939 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
940 return;
799 } 941 }
800 942
943 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
944 nvmsg->msg.v5_msg.send_table.offset);
945
946 for (i = 0; i < count; i++)
947 nvscdev->send_table[i] = tab[i];
801} 948}
802 949
803static void netvsc_channel_cb(void *context) 950void netvsc_channel_cb(void *context)
804{ 951{
805 int ret; 952 int ret;
806 struct hv_device *device = context; 953 struct vmbus_channel *channel = (struct vmbus_channel *)context;
954 struct hv_device *device;
807 struct netvsc_device *net_device; 955 struct netvsc_device *net_device;
808 u32 bytes_recvd; 956 u32 bytes_recvd;
809 u64 request_id; 957 u64 request_id;
@@ -812,14 +960,19 @@ static void netvsc_channel_cb(void *context)
812 int bufferlen = NETVSC_PACKET_SIZE; 960 int bufferlen = NETVSC_PACKET_SIZE;
813 struct net_device *ndev; 961 struct net_device *ndev;
814 962
963 if (channel->primary_channel != NULL)
964 device = channel->primary_channel->device_obj;
965 else
966 device = channel->device_obj;
967
815 net_device = get_inbound_net_device(device); 968 net_device = get_inbound_net_device(device);
816 if (!net_device) 969 if (!net_device)
817 return; 970 return;
818 ndev = net_device->ndev; 971 ndev = net_device->ndev;
819 buffer = net_device->cb_buffer; 972 buffer = get_per_channel_state(channel);
820 973
821 do { 974 do {
822 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen, 975 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
823 &bytes_recvd, &request_id); 976 &bytes_recvd, &request_id);
824 if (ret == 0) { 977 if (ret == 0) {
825 if (bytes_recvd > 0) { 978 if (bytes_recvd > 0) {
@@ -831,8 +984,12 @@ static void netvsc_channel_cb(void *context)
831 break; 984 break;
832 985
833 case VM_PKT_DATA_USING_XFER_PAGES: 986 case VM_PKT_DATA_USING_XFER_PAGES:
834 netvsc_receive(net_device, 987 netvsc_receive(net_device, channel,
835 device, desc); 988 device, desc);
989 break;
990
991 case VM_PKT_DATA_INBAND:
992 netvsc_send_table(device, desc);
836 break; 993 break;
837 994
838 default: 995 default:
@@ -880,11 +1037,9 @@ static void netvsc_channel_cb(void *context)
880int netvsc_device_add(struct hv_device *device, void *additional_info) 1037int netvsc_device_add(struct hv_device *device, void *additional_info)
881{ 1038{
882 int ret = 0; 1039 int ret = 0;
883 int i;
884 int ring_size = 1040 int ring_size =
885 ((struct netvsc_device_info *)additional_info)->ring_size; 1041 ((struct netvsc_device_info *)additional_info)->ring_size;
886 struct netvsc_device *net_device; 1042 struct netvsc_device *net_device;
887 struct hv_netvsc_packet *packet, *pos;
888 struct net_device *ndev; 1043 struct net_device *ndev;
889 1044
890 net_device = alloc_net_device(device); 1045 net_device = alloc_net_device(device);
@@ -893,6 +1048,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
893 goto cleanup; 1048 goto cleanup;
894 } 1049 }
895 1050
1051 net_device->ring_size = ring_size;
1052
896 /* 1053 /*
897 * Coming into this function, struct net_device * is 1054 * Coming into this function, struct net_device * is
898 * registered as the driver private data. 1055 * registered as the driver private data.
@@ -903,24 +1060,14 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
903 ndev = net_device->ndev; 1060 ndev = net_device->ndev;
904 1061
905 /* Initialize the NetVSC channel extension */ 1062 /* Initialize the NetVSC channel extension */
906 spin_lock_init(&net_device->recv_pkt_list_lock);
907
908 INIT_LIST_HEAD(&net_device->recv_pkt_list);
909
910 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
911 packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
912 if (!packet)
913 break;
914
915 list_add_tail(&packet->list_ent,
916 &net_device->recv_pkt_list);
917 }
918 init_completion(&net_device->channel_init_wait); 1063 init_completion(&net_device->channel_init_wait);
919 1064
1065 set_per_channel_state(device->channel, net_device->cb_buffer);
1066
920 /* Open the channel */ 1067 /* Open the channel */
921 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1068 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
922 ring_size * PAGE_SIZE, NULL, 0, 1069 ring_size * PAGE_SIZE, NULL, 0,
923 netvsc_channel_cb, device); 1070 netvsc_channel_cb, device->channel);
924 1071
925 if (ret != 0) { 1072 if (ret != 0) {
926 netdev_err(ndev, "unable to open channel: %d\n", ret); 1073 netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -930,6 +1077,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
930 /* Channel is opened */ 1077 /* Channel is opened */
931 pr_info("hv_netvsc channel opened successfully\n"); 1078 pr_info("hv_netvsc channel opened successfully\n");
932 1079
1080 net_device->chn_table[0] = device->channel;
1081
933 /* Connect with the NetVsp */ 1082 /* Connect with the NetVsp */
934 ret = netvsc_connect_vsp(device); 1083 ret = netvsc_connect_vsp(device);
935 if (ret != 0) { 1084 if (ret != 0) {
@@ -946,16 +1095,8 @@ close:
946 1095
947cleanup: 1096cleanup:
948 1097
949 if (net_device) { 1098 if (net_device)
950 list_for_each_entry_safe(packet, pos,
951 &net_device->recv_pkt_list,
952 list_ent) {
953 list_del(&packet->list_ent);
954 kfree(packet);
955 }
956
957 kfree(net_device); 1099 kfree(net_device);
958 }
959 1100
960 return ret; 1101 return ret;
961} 1102}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7918d5132c1f..4fd71b75e666 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -101,7 +101,7 @@ static int netvsc_open(struct net_device *net)
101 return ret; 101 return ret;
102 } 102 }
103 103
104 netif_start_queue(net); 104 netif_tx_start_all_queues(net);
105 105
106 nvdev = hv_get_drvdata(device_obj); 106 nvdev = hv_get_drvdata(device_obj);
107 rdev = nvdev->extension; 107 rdev = nvdev->extension;
@@ -149,15 +149,100 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
149 return ppi; 149 return ppi;
150} 150}
151 151
152union sub_key {
153 u64 k;
154 struct {
155 u8 pad[3];
156 u8 kb;
157 u32 ka;
158 };
159};
160
161/* Toeplitz hash function
162 * data: network byte order
163 * return: host byte order
164 */
165static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
166{
167 union sub_key subk;
168 int k_next = 4;
169 u8 dt;
170 int i, j;
171 u32 ret = 0;
172
173 subk.k = 0;
174 subk.ka = ntohl(*(u32 *)key);
175
176 for (i = 0; i < dlen; i++) {
177 subk.kb = key[k_next];
178 k_next = (k_next + 1) % klen;
179 dt = data[i];
180 for (j = 0; j < 8; j++) {
181 if (dt & 0x80)
182 ret ^= subk.ka;
183 dt <<= 1;
184 subk.k <<= 1;
185 }
186 }
187
188 return ret;
189}
190
191static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
192{
193 struct iphdr *iphdr;
194 int data_len;
195 bool ret = false;
196
197 if (eth_hdr(skb)->h_proto != htons(ETH_P_IP))
198 return false;
199
200 iphdr = ip_hdr(skb);
201
202 if (iphdr->version == 4) {
203 if (iphdr->protocol == IPPROTO_TCP)
204 data_len = 12;
205 else
206 data_len = 8;
207 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
208 (u8 *)&iphdr->saddr, data_len);
209 ret = true;
210 }
211
212 return ret;
213}
214
215static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
216 void *accel_priv, select_queue_fallback_t fallback)
217{
218 struct net_device_context *net_device_ctx = netdev_priv(ndev);
219 struct hv_device *hdev = net_device_ctx->device_ctx;
220 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
221 u32 hash;
222 u16 q_idx = 0;
223
224 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
225 return 0;
226
227 if (netvsc_set_hash(&hash, skb)) {
228 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
229 ndev->real_num_tx_queues;
230 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
231 }
232
233 return q_idx;
234}
235
152static void netvsc_xmit_completion(void *context) 236static void netvsc_xmit_completion(void *context)
153{ 237{
154 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 238 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
155 struct sk_buff *skb = (struct sk_buff *) 239 struct sk_buff *skb = (struct sk_buff *)
156 (unsigned long)packet->completion.send.send_completion_tid; 240 (unsigned long)packet->send_completion_tid;
241 u32 index = packet->send_buf_index;
157 242
158 kfree(packet); 243 kfree(packet);
159 244
160 if (skb) 245 if (skb && (index == NETVSC_INVALID_INDEX))
161 dev_kfree_skb_any(skb); 246 dev_kfree_skb_any(skb);
162} 247}
163 248
@@ -301,6 +386,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
301 struct ndis_tcp_lso_info *lso_info; 386 struct ndis_tcp_lso_info *lso_info;
302 int hdr_offset; 387 int hdr_offset;
303 u32 net_trans_info; 388 u32 net_trans_info;
389 u32 hash;
304 390
305 391
306 /* We will atmost need two pages to describe the rndis 392 /* We will atmost need two pages to describe the rndis
@@ -319,9 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
319 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 405 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
320 (num_data_pgs * sizeof(struct hv_page_buffer)) + 406 (num_data_pgs * sizeof(struct hv_page_buffer)) +
321 sizeof(struct rndis_message) + 407 sizeof(struct rndis_message) +
322 NDIS_VLAN_PPI_SIZE + 408 NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
323 NDIS_CSUM_PPI_SIZE + 409 NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
324 NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
325 if (!packet) { 410 if (!packet) {
326 /* out of memory, drop packet */ 411 /* out of memory, drop packet */
327 netdev_err(net, "unable to allocate hv_netvsc_packet\n"); 412 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -333,6 +418,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
333 418
334 packet->vlan_tci = skb->vlan_tci; 419 packet->vlan_tci = skb->vlan_tci;
335 420
421 packet->q_idx = skb_get_queue_mapping(skb);
422
336 packet->is_data_pkt = true; 423 packet->is_data_pkt = true;
337 packet->total_data_buflen = skb->len; 424 packet->total_data_buflen = skb->len;
338 425
@@ -341,9 +428,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
341 (num_data_pgs * sizeof(struct hv_page_buffer))); 428 (num_data_pgs * sizeof(struct hv_page_buffer)));
342 429
343 /* Set the completion routine */ 430 /* Set the completion routine */
344 packet->completion.send.send_completion = netvsc_xmit_completion; 431 packet->send_completion = netvsc_xmit_completion;
345 packet->completion.send.send_completion_ctx = packet; 432 packet->send_completion_ctx = packet;
346 packet->completion.send.send_completion_tid = (unsigned long)skb; 433 packet->send_completion_tid = (unsigned long)skb;
347 434
348 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT; 435 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
349 436
@@ -358,6 +445,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
358 445
359 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 446 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
360 447
448 hash = skb_get_hash_raw(skb);
449 if (hash != 0 && net->real_num_tx_queues > 1) {
450 rndis_msg_size += NDIS_HASH_PPI_SIZE;
451 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
452 NBL_HASH_VALUE);
453 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
454 }
455
361 if (isvlan) { 456 if (isvlan) {
362 struct ndis_pkt_8021q_info *vlan; 457 struct ndis_pkt_8021q_info *vlan;
363 458
@@ -558,6 +653,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
558 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 653 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
559 packet->vlan_tci); 654 packet->vlan_tci);
560 655
656 skb_record_rx_queue(skb, packet->channel->
657 offermsg.offer.sub_channel_index);
658
561 net->stats.rx_packets++; 659 net->stats.rx_packets++;
562 net->stats.rx_bytes += packet->total_data_buflen; 660 net->stats.rx_bytes += packet->total_data_buflen;
563 661
@@ -606,7 +704,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
606 hv_set_drvdata(hdev, ndev); 704 hv_set_drvdata(hdev, ndev);
607 device_info.ring_size = ring_size; 705 device_info.ring_size = ring_size;
608 rndis_filter_device_add(hdev, &device_info); 706 rndis_filter_device_add(hdev, &device_info);
609 netif_wake_queue(ndev); 707 netif_tx_wake_all_queues(ndev);
610 708
611 return 0; 709 return 0;
612} 710}
@@ -652,6 +750,7 @@ static const struct net_device_ops device_ops = {
652 .ndo_change_mtu = netvsc_change_mtu, 750 .ndo_change_mtu = netvsc_change_mtu,
653 .ndo_validate_addr = eth_validate_addr, 751 .ndo_validate_addr = eth_validate_addr,
654 .ndo_set_mac_address = netvsc_set_mac_addr, 752 .ndo_set_mac_address = netvsc_set_mac_addr,
753 .ndo_select_queue = netvsc_select_queue,
655}; 754};
656 755
657/* 756/*
@@ -698,9 +797,11 @@ static int netvsc_probe(struct hv_device *dev,
698 struct net_device *net = NULL; 797 struct net_device *net = NULL;
699 struct net_device_context *net_device_ctx; 798 struct net_device_context *net_device_ctx;
700 struct netvsc_device_info device_info; 799 struct netvsc_device_info device_info;
800 struct netvsc_device *nvdev;
701 int ret; 801 int ret;
702 802
703 net = alloc_etherdev(sizeof(struct net_device_context)); 803 net = alloc_etherdev_mq(sizeof(struct net_device_context),
804 num_online_cpus());
704 if (!net) 805 if (!net)
705 return -ENOMEM; 806 return -ENOMEM;
706 807
@@ -719,7 +820,7 @@ static int netvsc_probe(struct hv_device *dev,
719 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | 820 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
720 NETIF_F_IP_CSUM | NETIF_F_TSO; 821 NETIF_F_IP_CSUM | NETIF_F_TSO;
721 822
722 SET_ETHTOOL_OPS(net, &ethtool_ops); 823 net->ethtool_ops = &ethtool_ops;
723 SET_NETDEV_DEV(net, &dev->device); 824 SET_NETDEV_DEV(net, &dev->device);
724 825
725 /* Notify the netvsc driver of the new device */ 826 /* Notify the netvsc driver of the new device */
@@ -733,6 +834,10 @@ static int netvsc_probe(struct hv_device *dev,
733 } 834 }
734 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 835 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
735 836
837 nvdev = hv_get_drvdata(dev);
838 netif_set_real_num_tx_queues(net, nvdev->num_chn);
839 netif_set_real_num_rx_queues(net, nvdev->num_chn);
840
736 ret = register_netdev(net); 841 ret = register_netdev(net);
737 if (ret != 0) { 842 if (ret != 0) {
738 pr_err("Unable to register netdev.\n"); 843 pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 143a98caf618..99c527adae5b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,7 +31,7 @@
31#include "hyperv_net.h" 31#include "hyperv_net.h"
32 32
33 33
34#define RNDIS_EXT_LEN 100 34#define RNDIS_EXT_LEN PAGE_SIZE
35struct rndis_request { 35struct rndis_request {
36 struct list_head list_ent; 36 struct list_head list_ent;
37 struct completion wait_event; 37 struct completion wait_event;
@@ -94,6 +94,8 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
94 rndis_msg->ndis_msg_type = msg_type; 94 rndis_msg->ndis_msg_type = msg_type;
95 rndis_msg->msg_len = msg_len; 95 rndis_msg->msg_len = msg_len;
96 96
97 request->pkt.q_idx = 0;
98
97 /* 99 /*
98 * Set the request id. This field is always after the rndis header for 100 * Set the request id. This field is always after the rndis header for
99 * request/response packet types so we just used the SetRequest as a 101 * request/response packet types so we just used the SetRequest as a
@@ -234,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
234 packet->page_buf[0].len; 236 packet->page_buf[0].len;
235 } 237 }
236 238
237 packet->completion.send.send_completion = NULL; 239 packet->send_completion = NULL;
238 240
239 ret = netvsc_send(dev->net_dev->dev, packet); 241 ret = netvsc_send(dev->net_dev->dev, packet);
240 return ret; 242 return ret;
@@ -399,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
399 pkt->total_data_buflen = rndis_pkt->data_len; 401 pkt->total_data_buflen = rndis_pkt->data_len;
400 pkt->data = (void *)((unsigned long)pkt->data + data_offset); 402 pkt->data = (void *)((unsigned long)pkt->data + data_offset);
401 403
402 pkt->is_data_pkt = true;
403
404 vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); 404 vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
405 if (vlan) { 405 if (vlan) {
406 pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | 406 pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
@@ -509,6 +509,19 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
509 query->info_buflen = 0; 509 query->info_buflen = 0;
510 query->dev_vc_handle = 0; 510 query->dev_vc_handle = 0;
511 511
512 if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
513 struct ndis_recv_scale_cap *cap;
514
515 request->request_msg.msg_len +=
516 sizeof(struct ndis_recv_scale_cap);
517 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
518 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
519 query->info_buf_offset);
520 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
521 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
522 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
523 }
524
512 ret = rndis_filter_send_request(dev, request); 525 ret = rndis_filter_send_request(dev, request);
513 if (ret != 0) 526 if (ret != 0)
514 goto cleanup; 527 goto cleanup;
@@ -695,6 +708,89 @@ cleanup:
695 return ret; 708 return ret;
696} 709}
697 710
711u8 netvsc_hash_key[HASH_KEYLEN] = {
712 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
713 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
714 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
715 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
716 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
717};
718
719int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
720{
721 struct net_device *ndev = rdev->net_dev->ndev;
722 struct rndis_request *request;
723 struct rndis_set_request *set;
724 struct rndis_set_complete *set_complete;
725 u32 extlen = sizeof(struct ndis_recv_scale_param) +
726 4*ITAB_NUM + HASH_KEYLEN;
727 struct ndis_recv_scale_param *rssp;
728 u32 *itab;
729 u8 *keyp;
730 int i, t, ret;
731
732 request = get_rndis_request(
733 rdev, RNDIS_MSG_SET,
734 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
735 if (!request)
736 return -ENOMEM;
737
738 set = &request->request_msg.msg.set_req;
739 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
740 set->info_buflen = extlen;
741 set->info_buf_offset = sizeof(struct rndis_set_request);
742 set->dev_vc_handle = 0;
743
744 rssp = (struct ndis_recv_scale_param *)(set + 1);
745 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
746 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
747 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
748 rssp->flag = 0;
749 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
750 NDIS_HASH_TCP_IPV4;
751 rssp->indirect_tabsize = 4*ITAB_NUM;
752 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
753 rssp->hashkey_size = HASH_KEYLEN;
754 rssp->kashkey_offset = rssp->indirect_taboffset +
755 rssp->indirect_tabsize;
756
757 /* Set indirection table entries */
758 itab = (u32 *)(rssp + 1);
759 for (i = 0; i < ITAB_NUM; i++)
760 itab[i] = i % num_queue;
761
762 /* Set hask key values */
763 keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
764 for (i = 0; i < HASH_KEYLEN; i++)
765 keyp[i] = netvsc_hash_key[i];
766
767
768 ret = rndis_filter_send_request(rdev, request);
769 if (ret != 0)
770 goto cleanup;
771
772 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
773 if (t == 0) {
774 netdev_err(ndev, "timeout before we got a set response...\n");
775 /* can't put_rndis_request, since we may still receive a
776 * send-completion.
777 */
778 return -ETIMEDOUT;
779 } else {
780 set_complete = &request->response_msg.msg.set_complete;
781 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
782 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
783 set_complete->status);
784 ret = -EINVAL;
785 }
786 }
787
788cleanup:
789 put_rndis_request(rdev, request);
790 return ret;
791}
792
793
698static int rndis_filter_query_device_link_status(struct rndis_device *dev) 794static int rndis_filter_query_device_link_status(struct rndis_device *dev)
699{ 795{
700 u32 size = sizeof(u32); 796 u32 size = sizeof(u32);
@@ -886,6 +982,28 @@ static int rndis_filter_close_device(struct rndis_device *dev)
886 return ret; 982 return ret;
887} 983}
888 984
985static void netvsc_sc_open(struct vmbus_channel *new_sc)
986{
987 struct netvsc_device *nvscdev;
988 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
989 int ret;
990
991 nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
992
993 if (chn_index >= nvscdev->num_chn)
994 return;
995
996 set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
997 NETVSC_PACKET_SIZE);
998
999 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
1000 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
1001 netvsc_channel_cb, new_sc);
1002
1003 if (ret == 0)
1004 nvscdev->chn_table[chn_index] = new_sc;
1005}
1006
889int rndis_filter_device_add(struct hv_device *dev, 1007int rndis_filter_device_add(struct hv_device *dev,
890 void *additional_info) 1008 void *additional_info)
891{ 1009{
@@ -894,6 +1012,10 @@ int rndis_filter_device_add(struct hv_device *dev,
894 struct rndis_device *rndis_device; 1012 struct rndis_device *rndis_device;
895 struct netvsc_device_info *device_info = additional_info; 1013 struct netvsc_device_info *device_info = additional_info;
896 struct ndis_offload_params offloads; 1014 struct ndis_offload_params offloads;
1015 struct nvsp_message *init_packet;
1016 int t;
1017 struct ndis_recv_scale_cap rsscap;
1018 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
897 1019
898 rndis_device = get_rndis_device(); 1020 rndis_device = get_rndis_device();
899 if (!rndis_device) 1021 if (!rndis_device)
@@ -913,6 +1035,7 @@ int rndis_filter_device_add(struct hv_device *dev,
913 1035
914 /* Initialize the rndis device */ 1036 /* Initialize the rndis device */
915 net_device = hv_get_drvdata(dev); 1037 net_device = hv_get_drvdata(dev);
1038 net_device->num_chn = 1;
916 1039
917 net_device->extension = rndis_device; 1040 net_device->extension = rndis_device;
918 rndis_device->net_dev = net_device; 1041 rndis_device->net_dev = net_device;
@@ -952,7 +1075,6 @@ int rndis_filter_device_add(struct hv_device *dev,
952 if (ret) 1075 if (ret)
953 goto err_dev_remv; 1076 goto err_dev_remv;
954 1077
955
956 rndis_filter_query_device_link_status(rndis_device); 1078 rndis_filter_query_device_link_status(rndis_device);
957 1079
958 device_info->link_state = rndis_device->link_state; 1080 device_info->link_state = rndis_device->link_state;
@@ -961,7 +1083,66 @@ int rndis_filter_device_add(struct hv_device *dev,
961 rndis_device->hw_mac_adr, 1083 rndis_device->hw_mac_adr,
962 device_info->link_state ? "down" : "up"); 1084 device_info->link_state ? "down" : "up");
963 1085
964 return ret; 1086 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1087 return 0;
1088
1089 /* vRSS setup */
1090 memset(&rsscap, 0, rsscap_size);
1091 ret = rndis_filter_query_device(rndis_device,
1092 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1093 &rsscap, &rsscap_size);
1094 if (ret || rsscap.num_recv_que < 2)
1095 goto out;
1096
1097 net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
1098 num_online_cpus() : rsscap.num_recv_que;
1099 if (net_device->num_chn == 1)
1100 goto out;
1101
1102 net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
1103 NETVSC_PACKET_SIZE);
1104 if (!net_device->sub_cb_buf) {
1105 net_device->num_chn = 1;
1106 dev_info(&dev->device, "No memory for subchannels.\n");
1107 goto out;
1108 }
1109
1110 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1111
1112 init_packet = &net_device->channel_init_pkt;
1113 memset(init_packet, 0, sizeof(struct nvsp_message));
1114 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1115 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1116 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1117 net_device->num_chn - 1;
1118 ret = vmbus_sendpacket(dev->channel, init_packet,
1119 sizeof(struct nvsp_message),
1120 (unsigned long)init_packet,
1121 VM_PKT_DATA_INBAND,
1122 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1123 if (ret)
1124 goto out;
1125 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
1126 if (t == 0) {
1127 ret = -ETIMEDOUT;
1128 goto out;
1129 }
1130 if (init_packet->msg.v5_msg.subchn_comp.status !=
1131 NVSP_STAT_SUCCESS) {
1132 ret = -ENODEV;
1133 goto out;
1134 }
1135 net_device->num_chn = 1 +
1136 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1137
1138 vmbus_are_subchannels_present(dev->channel);
1139
1140 ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
1141
1142out:
1143 if (ret)
1144 net_device->num_chn = 1;
1145 return 0; /* return 0 because primary channel can be used alone */
965 1146
966err_dev_remv: 1147err_dev_remv:
967 rndis_filter_device_remove(dev); 1148 rndis_filter_device_remove(dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index e36f194673a4..4517b149ed07 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/irq.h>
26#include <linux/gpio.h> 27#include <linux/gpio.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/mutex.h> 29#include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
692 if (rc < 0) 693 if (rc < 0)
693 goto err_rx; 694 goto err_rx;
694 695
695 rc = at86rf230_start(dev); 696 return at86rf230_start(dev);
696
697 return rc;
698
699err_rx: 697err_rx:
700 at86rf230_start(dev); 698 at86rf230_start(dev);
701err: 699err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
963 return at86rf230_isr(irq, data); 961 return at86rf230_isr(irq, data);
964} 962}
965 963
966static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
967{
968 return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
969}
970
971static int at86rf230_hw_init(struct at86rf230_local *lp) 964static int at86rf230_hw_init(struct at86rf230_local *lp)
972{ 965{
973 struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data; 966 int rc, irq_pol, irq_type;
974 int rc, irq_pol; 967 u8 dvdd;
975 u8 status;
976 u8 csma_seed[2]; 968 u8 csma_seed[2];
977 969
978 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
979 if (rc)
980 return rc;
981
982 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF); 970 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
983 if (rc) 971 if (rc)
984 return rc; 972 return rc;
985 973
974 irq_type = irq_get_trigger_type(lp->spi->irq);
986 /* configure irq polarity, defaults to high active */ 975 /* configure irq polarity, defaults to high active */
987 if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) 976 if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
988 irq_pol = IRQ_ACTIVE_LOW; 977 irq_pol = IRQ_ACTIVE_LOW;
989 else 978 else
990 irq_pol = IRQ_ACTIVE_HIGH; 979 irq_pol = IRQ_ACTIVE_HIGH;
991 980
992 rc = at86rf230_irq_polarity(lp, irq_pol); 981 rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
993 if (rc) 982 if (rc)
994 return rc; 983 return rc;
995 984
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
1017 /* Wait the next SLEEP cycle */ 1006 /* Wait the next SLEEP cycle */
1018 msleep(100); 1007 msleep(100);
1019 1008
1020 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status); 1009 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
1021 if (rc) 1010 if (rc)
1022 return rc; 1011 return rc;
1023 if (!status) { 1012 if (!dvdd) {
1024 dev_err(&lp->spi->dev, "DVDD error\n"); 1013 dev_err(&lp->spi->dev, "DVDD error\n");
1025 return -EINVAL; 1014 return -EINVAL;
1026 } 1015 }
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
1032at86rf230_get_pdata(struct spi_device *spi) 1021at86rf230_get_pdata(struct spi_device *spi)
1033{ 1022{
1034 struct at86rf230_platform_data *pdata; 1023 struct at86rf230_platform_data *pdata;
1035 const char *irq_type;
1036 1024
1037 if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) 1025 if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
1038 return spi->dev.platform_data; 1026 return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
1044 pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); 1032 pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
1045 pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); 1033 pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
1046 1034
1047 pdata->irq_type = IRQF_TRIGGER_RISING;
1048 of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
1049 if (!strcmp(irq_type, "level-high"))
1050 pdata->irq_type = IRQF_TRIGGER_HIGH;
1051 else if (!strcmp(irq_type, "level-low"))
1052 pdata->irq_type = IRQF_TRIGGER_LOW;
1053 else if (!strcmp(irq_type, "edge-rising"))
1054 pdata->irq_type = IRQF_TRIGGER_RISING;
1055 else if (!strcmp(irq_type, "edge-falling"))
1056 pdata->irq_type = IRQF_TRIGGER_FALLING;
1057 else
1058 dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
1059
1060 spi->dev.platform_data = pdata; 1035 spi->dev.platform_data = pdata;
1061done: 1036done:
1062 return pdata; 1037 return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
1071 u8 part = 0, version = 0, status; 1046 u8 part = 0, version = 0, status;
1072 irq_handler_t irq_handler; 1047 irq_handler_t irq_handler;
1073 work_func_t irq_worker; 1048 work_func_t irq_worker;
1074 int rc; 1049 int rc, irq_type;
1075 const char *chip; 1050 const char *chip;
1076 struct ieee802154_ops *ops = NULL; 1051 struct ieee802154_ops *ops = NULL;
1077 1052
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
1087 } 1062 }
1088 1063
1089 if (gpio_is_valid(pdata->rstn)) { 1064 if (gpio_is_valid(pdata->rstn)) {
1090 rc = gpio_request(pdata->rstn, "rstn"); 1065 rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
1066 GPIOF_OUT_INIT_HIGH, "rstn");
1091 if (rc) 1067 if (rc)
1092 return rc; 1068 return rc;
1093 } 1069 }
1094 1070
1095 if (gpio_is_valid(pdata->slp_tr)) { 1071 if (gpio_is_valid(pdata->slp_tr)) {
1096 rc = gpio_request(pdata->slp_tr, "slp_tr"); 1072 rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
1097 if (rc) 1073 GPIOF_OUT_INIT_LOW, "slp_tr");
1098 goto err_slp_tr;
1099 }
1100
1101 if (gpio_is_valid(pdata->rstn)) {
1102 rc = gpio_direction_output(pdata->rstn, 1);
1103 if (rc)
1104 goto err_gpio_dir;
1105 }
1106
1107 if (gpio_is_valid(pdata->slp_tr)) {
1108 rc = gpio_direction_output(pdata->slp_tr, 0);
1109 if (rc) 1074 if (rc)
1110 goto err_gpio_dir; 1075 return rc;
1111 } 1076 }
1112 1077
1113 /* Reset */ 1078 /* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
1121 1086
1122 rc = __at86rf230_detect_device(spi, &man_id, &part, &version); 1087 rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
1123 if (rc < 0) 1088 if (rc < 0)
1124 goto err_gpio_dir; 1089 return rc;
1125 1090
1126 if (man_id != 0x001f) { 1091 if (man_id != 0x001f) {
1127 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n", 1092 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
1128 man_id >> 8, man_id & 0xFF); 1093 man_id >> 8, man_id & 0xFF);
1129 rc = -EINVAL; 1094 return -EINVAL;
1130 goto err_gpio_dir;
1131 } 1095 }
1132 1096
1133 switch (part) { 1097 switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
1154 } 1118 }
1155 1119
1156 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version); 1120 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
1157 if (!ops) { 1121 if (!ops)
1158 rc = -ENOTSUPP; 1122 return -ENOTSUPP;
1159 goto err_gpio_dir;
1160 }
1161 1123
1162 dev = ieee802154_alloc_device(sizeof(*lp), ops); 1124 dev = ieee802154_alloc_device(sizeof(*lp), ops);
1163 if (!dev) { 1125 if (!dev)
1164 rc = -ENOMEM; 1126 return -ENOMEM;
1165 goto err_gpio_dir;
1166 }
1167 1127
1168 lp = dev->priv; 1128 lp = dev->priv;
1169 lp->dev = dev; 1129 lp->dev = dev;
@@ -1176,7 +1136,8 @@ static int at86rf230_probe(struct spi_device *spi)
1176 dev->extra_tx_headroom = 0; 1136 dev->extra_tx_headroom = 0;
1177 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK; 1137 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
1178 1138
1179 if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 1139 irq_type = irq_get_trigger_type(spi->irq);
1140 if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
1180 irq_worker = at86rf230_irqwork; 1141 irq_worker = at86rf230_irqwork;
1181 irq_handler = at86rf230_isr; 1142 irq_handler = at86rf230_isr;
1182 } else { 1143 } else {
@@ -1202,75 +1163,65 @@ static int at86rf230_probe(struct spi_device *spi)
1202 if (rc) 1163 if (rc)
1203 goto err_hw_init; 1164 goto err_hw_init;
1204 1165
1205 rc = request_irq(spi->irq, irq_handler, 1166 /* Read irq status register to reset irq line */
1206 IRQF_SHARED | pdata->irq_type, 1167 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
1207 dev_name(&spi->dev), lp);
1208 if (rc) 1168 if (rc)
1209 goto err_hw_init; 1169 goto err_hw_init;
1210 1170
1211 /* Read irq status register to reset irq line */ 1171 rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
1212 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status); 1172 dev_name(&spi->dev), lp);
1213 if (rc) 1173 if (rc)
1214 goto err_irq; 1174 goto err_hw_init;
1215 1175
1216 rc = ieee802154_register_device(lp->dev); 1176 rc = ieee802154_register_device(lp->dev);
1217 if (rc) 1177 if (rc)
1218 goto err_irq; 1178 goto err_hw_init;
1219 1179
1220 return rc; 1180 return rc;
1221 1181
1222err_irq:
1223 free_irq(spi->irq, lp);
1224err_hw_init: 1182err_hw_init:
1225 flush_work(&lp->irqwork); 1183 flush_work(&lp->irqwork);
1226 spi_set_drvdata(spi, NULL);
1227 mutex_destroy(&lp->bmux); 1184 mutex_destroy(&lp->bmux);
1228 ieee802154_free_device(lp->dev); 1185 ieee802154_free_device(lp->dev);
1229 1186
1230err_gpio_dir:
1231 if (gpio_is_valid(pdata->slp_tr))
1232 gpio_free(pdata->slp_tr);
1233err_slp_tr:
1234 if (gpio_is_valid(pdata->rstn))
1235 gpio_free(pdata->rstn);
1236 return rc; 1187 return rc;
1237} 1188}
1238 1189
1239static int at86rf230_remove(struct spi_device *spi) 1190static int at86rf230_remove(struct spi_device *spi)
1240{ 1191{
1241 struct at86rf230_local *lp = spi_get_drvdata(spi); 1192 struct at86rf230_local *lp = spi_get_drvdata(spi);
1242 struct at86rf230_platform_data *pdata = spi->dev.platform_data;
1243 1193
1244 /* mask all at86rf230 irq's */ 1194 /* mask all at86rf230 irq's */
1245 at86rf230_write_subreg(lp, SR_IRQ_MASK, 0); 1195 at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
1246 ieee802154_unregister_device(lp->dev); 1196 ieee802154_unregister_device(lp->dev);
1247
1248 free_irq(spi->irq, lp);
1249 flush_work(&lp->irqwork); 1197 flush_work(&lp->irqwork);
1250
1251 if (gpio_is_valid(pdata->slp_tr))
1252 gpio_free(pdata->slp_tr);
1253 if (gpio_is_valid(pdata->rstn))
1254 gpio_free(pdata->rstn);
1255
1256 mutex_destroy(&lp->bmux); 1198 mutex_destroy(&lp->bmux);
1257 ieee802154_free_device(lp->dev); 1199 ieee802154_free_device(lp->dev);
1258
1259 dev_dbg(&spi->dev, "unregistered at86rf230\n"); 1200 dev_dbg(&spi->dev, "unregistered at86rf230\n");
1201
1260 return 0; 1202 return 0;
1261} 1203}
1262 1204
1263#if IS_ENABLED(CONFIG_OF) 1205static const struct of_device_id at86rf230_of_match[] = {
1264static struct of_device_id at86rf230_of_match[] = {
1265 { .compatible = "atmel,at86rf230", }, 1206 { .compatible = "atmel,at86rf230", },
1266 { .compatible = "atmel,at86rf231", }, 1207 { .compatible = "atmel,at86rf231", },
1267 { .compatible = "atmel,at86rf233", }, 1208 { .compatible = "atmel,at86rf233", },
1268 { .compatible = "atmel,at86rf212", }, 1209 { .compatible = "atmel,at86rf212", },
1269 { }, 1210 { },
1270}; 1211};
1271#endif 1212MODULE_DEVICE_TABLE(of, at86rf230_of_match);
1213
1214static const struct spi_device_id at86rf230_device_id[] = {
1215 { .name = "at86rf230", },
1216 { .name = "at86rf231", },
1217 { .name = "at86rf233", },
1218 { .name = "at86rf212", },
1219 { },
1220};
1221MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
1272 1222
1273static struct spi_driver at86rf230_driver = { 1223static struct spi_driver at86rf230_driver = {
1224 .id_table = at86rf230_device_id,
1274 .driver = { 1225 .driver = {
1275 .of_match_table = of_match_ptr(at86rf230_of_match), 1226 .of_match_table = of_match_ptr(at86rf230_of_match),
1276 .name = "at86rf230", 1227 .name = "at86rf230",
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index b8d22173925d..27d83207d24c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -26,6 +26,7 @@
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/device.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30#include <net/mac802154.h> 31#include <net/mac802154.h>
31#include <net/wpan-phy.h> 32#include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
228 int err = -ENOMEM; 229 int err = -ENOMEM;
229 int i; 230 int i;
230 231
231 priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL); 232 priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
233 GFP_KERNEL);
232 if (!priv) 234 if (!priv)
233 goto err_alloc; 235 goto err_alloc;
234 236
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
248err_slave: 250err_slave:
249 list_for_each_entry(dp, &priv->list, list) 251 list_for_each_entry(dp, &priv->list, list)
250 fakelb_del(dp); 252 fakelb_del(dp);
251 kfree(priv);
252err_alloc: 253err_alloc:
253 return err; 254 return err;
254} 255}
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
260 261
261 list_for_each_entry_safe(dp, temp, &priv->list, list) 262 list_for_each_entry_safe(dp, temp, &priv->list, list)
262 fakelb_del(dp); 263 fakelb_del(dp);
263 kfree(priv);
264 264
265 return 0; 265 return 0;
266} 266}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 78a6552ed707..4048062011ba 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -618,12 +618,12 @@ static int mrf24j40_probe(struct spi_device *spi)
618 618
619 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); 619 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
620 620
621 devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL); 621 devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL);
622 if (!devrec) 622 if (!devrec)
623 goto err_devrec; 623 goto err_ret;
624 devrec->buf = kzalloc(3, GFP_KERNEL); 624 devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL);
625 if (!devrec->buf) 625 if (!devrec->buf)
626 goto err_buf; 626 goto err_ret;
627 627
628 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ 628 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
629 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) 629 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
@@ -638,7 +638,7 @@ static int mrf24j40_probe(struct spi_device *spi)
638 638
639 devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops); 639 devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
640 if (!devrec->dev) 640 if (!devrec->dev)
641 goto err_alloc_dev; 641 goto err_ret;
642 642
643 devrec->dev->priv = devrec; 643 devrec->dev->priv = devrec;
644 devrec->dev->parent = &devrec->spi->dev; 644 devrec->dev->parent = &devrec->spi->dev;
@@ -676,12 +676,13 @@ static int mrf24j40_probe(struct spi_device *spi)
676 val &= ~0x3; /* Clear RX mode (normal) */ 676 val &= ~0x3; /* Clear RX mode (normal) */
677 write_short_reg(devrec, REG_RXMCR, val); 677 write_short_reg(devrec, REG_RXMCR, val);
678 678
679 ret = request_threaded_irq(spi->irq, 679 ret = devm_request_threaded_irq(&spi->dev,
680 NULL, 680 spi->irq,
681 mrf24j40_isr, 681 NULL,
682 IRQF_TRIGGER_LOW|IRQF_ONESHOT, 682 mrf24j40_isr,
683 dev_name(&spi->dev), 683 IRQF_TRIGGER_LOW|IRQF_ONESHOT,
684 devrec); 684 dev_name(&spi->dev),
685 devrec);
685 686
686 if (ret) { 687 if (ret) {
687 dev_err(printdev(devrec), "Unable to get IRQ"); 688 dev_err(printdev(devrec), "Unable to get IRQ");
@@ -695,11 +696,7 @@ err_read_reg:
695 ieee802154_unregister_device(devrec->dev); 696 ieee802154_unregister_device(devrec->dev);
696err_register_device: 697err_register_device:
697 ieee802154_free_device(devrec->dev); 698 ieee802154_free_device(devrec->dev);
698err_alloc_dev: 699err_ret:
699 kfree(devrec->buf);
700err_buf:
701 kfree(devrec);
702err_devrec:
703 return ret; 700 return ret;
704} 701}
705 702
@@ -709,15 +706,11 @@ static int mrf24j40_remove(struct spi_device *spi)
709 706
710 dev_dbg(printdev(devrec), "remove\n"); 707 dev_dbg(printdev(devrec), "remove\n");
711 708
712 free_irq(spi->irq, devrec);
713 ieee802154_unregister_device(devrec->dev); 709 ieee802154_unregister_device(devrec->dev);
714 ieee802154_free_device(devrec->dev); 710 ieee802154_free_device(devrec->dev);
715 /* TODO: Will ieee802154_free_device() wait until ->xmit() is 711 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
716 * complete? */ 712 * complete? */
717 713
718 /* Clean up the SPI stuff. */
719 kfree(devrec->buf);
720 kfree(devrec);
721 return 0; 714 return 0;
722} 715}
723 716
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 3da44d5d9149..8d101d63abca 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -396,7 +396,8 @@ config MCS_FIR
396 396
397config SH_IRDA 397config SH_IRDA
398 tristate "SuperH IrDA driver" 398 tristate "SuperH IrDA driver"
399 depends on IRDA && ARCH_SHMOBILE 399 depends on IRDA
400 depends on ARCH_SHMOBILE || COMPILE_TEST
400 help 401 help
401 Say Y here if your want to enable SuperH IrDA devices. 402 Say Y here if your want to enable SuperH IrDA devices.
402 403
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2900af091c2d..998bb89ede71 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -510,10 +510,8 @@ static void via_hw_init(struct via_ircc_cb *self)
510 */ 510 */
511static int via_ircc_read_dongle_id(int iobase) 511static int via_ircc_read_dongle_id(int iobase)
512{ 512{
513 int dongle_id = 9; /* Default to IBM */
514
515 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n"); 513 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
516 return dongle_id; 514 return 9; /* Default to IBM */
517} 515}
518 516
519/* 517/*
@@ -926,7 +924,6 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
926static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) 924static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
927{ 925{
928 int iobase; 926 int iobase;
929 int ret = TRUE;
930 u8 Tx_status; 927 u8 Tx_status;
931 928
932 IRDA_DEBUG(3, "%s()\n", __func__); 929 IRDA_DEBUG(3, "%s()\n", __func__);
@@ -983,7 +980,7 @@ F01_E*/
983 // Tell the network layer, that we can accept more frames 980 // Tell the network layer, that we can accept more frames
984 netif_wake_queue(self->netdev); 981 netif_wake_queue(self->netdev);
985//F01 } 982//F01 }
986 return ret; 983 return TRUE;
987} 984}
988 985
989/* 986/*
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index e641bb240362..11dbdf36d9c1 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -62,10 +62,6 @@
62#include "w83977af.h" 62#include "w83977af.h"
63#include "w83977af_ir.h" 63#include "w83977af_ir.h"
64 64
65#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
66#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
67#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
68#endif
69#define CONFIG_USE_W977_PNP /* Currently needed */ 65#define CONFIG_USE_W977_PNP /* Currently needed */
70#define PIO_MAX_SPEED 115200 66#define PIO_MAX_SPEED 115200
71 67
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
332 w977_write_reg(0x74, dma+1, efbase[i]); 328 w977_write_reg(0x74, dma+1, efbase[i]);
333#else 329#else
334 w977_write_reg(0x74, dma, efbase[i]); 330 w977_write_reg(0x74, dma, efbase[i]);
335#endif /*CONFIG_ARCH_NETWINDER */ 331#endif /* CONFIG_ARCH_NETWINDER */
336 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */ 332 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
337 333
338 /* Set append hardware CRC, enable IR bank selection */ 334 /* Set append hardware CRC, enable IR bank selection */
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
563static void w83977af_dma_write(struct w83977af_ir *self, int iobase) 559static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
564{ 560{
565 __u8 set; 561 __u8 set;
566#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
567 unsigned long flags;
568 __u8 hcr;
569#endif
570 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len); 562 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
571 563
572 /* Save current set */ 564 /* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
579 /* Choose transmit DMA channel */ 571 /* Choose transmit DMA channel */
580 switch_bank(iobase, SET2); 572 switch_bank(iobase, SET2);
581 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1); 573 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
582#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
583 spin_lock_irqsave(&self->lock, flags);
584
585 disable_dma(self->io.dma);
586 clear_dma_ff(self->io.dma);
587 set_dma_mode(self->io.dma, DMA_MODE_READ);
588 set_dma_addr(self->io.dma, self->tx_buff_dma);
589 set_dma_count(self->io.dma, self->tx_buff.len);
590#else
591 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, 574 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
592 DMA_MODE_WRITE); 575 DMA_MODE_WRITE);
593#endif
594 self->io.direction = IO_XMIT; 576 self->io.direction = IO_XMIT;
595 577
596 /* Enable DMA */ 578 /* Enable DMA */
597 switch_bank(iobase, SET0); 579 switch_bank(iobase, SET0);
598#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
599 hcr = inb(iobase+HCR);
600 outb(hcr | HCR_EN_DMA, iobase+HCR);
601 enable_dma(self->io.dma);
602 spin_unlock_irqrestore(&self->lock, flags);
603#else
604 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR); 580 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
605#endif
606 581
607 /* Restore set register */ 582 /* Restore set register */
608 outb(set, iobase+SSR); 583 outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
711{ 686{
712 int iobase; 687 int iobase;
713 __u8 set; 688 __u8 set;
714#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS 689#ifdef CONFIG_ARCH_NETWINDER
715 unsigned long flags; 690 unsigned long flags;
716 __u8 hcr; 691 __u8 hcr;
717#endif 692#endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
736 self->io.direction = IO_RECV; 711 self->io.direction = IO_RECV;
737 self->rx_buff.data = self->rx_buff.head; 712 self->rx_buff.data = self->rx_buff.head;
738 713
739#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS 714#ifdef CONFIG_ARCH_NETWINDER
740 spin_lock_irqsave(&self->lock, flags); 715 spin_lock_irqsave(&self->lock, flags);
741 716
742 disable_dma(self->io.dma); 717 disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
759 734
760 /* Enable DMA */ 735 /* Enable DMA */
761 switch_bank(iobase, SET0); 736 switch_bank(iobase, SET0);
762#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS 737#ifdef CONFIG_ARCH_NETWINDER
763 hcr = inb(iobase+HCR); 738 hcr = inb(iobase+HCR);
764 outb(hcr | HCR_EN_DMA, iobase+HCR); 739 outb(hcr | HCR_EN_DMA, iobase+HCR);
765 enable_dma(self->io.dma); 740 enable_dma(self->io.dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d53e299ae1d9..958df383068a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -30,8 +30,10 @@
30#include <linux/if_link.h> 30#include <linux/if_link.h>
31#include <linux/if_macvlan.h> 31#include <linux/if_macvlan.h>
32#include <linux/hash.h> 32#include <linux/hash.h>
33#include <linux/workqueue.h>
33#include <net/rtnetlink.h> 34#include <net/rtnetlink.h>
34#include <net/xfrm.h> 35#include <net/xfrm.h>
36#include <linux/netpoll.h>
35 37
36#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) 38#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
37 39
@@ -40,10 +42,19 @@ struct macvlan_port {
40 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 42 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
41 struct list_head vlans; 43 struct list_head vlans;
42 struct rcu_head rcu; 44 struct rcu_head rcu;
45 struct sk_buff_head bc_queue;
46 struct work_struct bc_work;
43 bool passthru; 47 bool passthru;
44 int count;
45}; 48};
46 49
50#define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans)
51
52struct macvlan_skb_cb {
53 const struct macvlan_dev *src;
54};
55
56#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
57
47static void macvlan_port_destroy(struct net_device *dev); 58static void macvlan_port_destroy(struct net_device *dev);
48 59
49static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) 60static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@ -120,7 +131,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
120 struct net_device *dev = vlan->dev; 131 struct net_device *dev = vlan->dev;
121 132
122 if (local) 133 if (local)
123 return dev_forward_skb(dev, skb); 134 return __dev_forward_skb(dev, skb);
124 135
125 skb->dev = dev; 136 skb->dev = dev;
126 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) 137 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +139,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
128 else 139 else
129 skb->pkt_type = PACKET_MULTICAST; 140 skb->pkt_type = PACKET_MULTICAST;
130 141
131 return netif_rx(skb); 142 return 0;
132} 143}
133 144
134static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) 145static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -175,32 +186,32 @@ static void macvlan_broadcast(struct sk_buff *skb,
175 if (likely(nskb)) 186 if (likely(nskb))
176 err = macvlan_broadcast_one( 187 err = macvlan_broadcast_one(
177 nskb, vlan, eth, 188 nskb, vlan, eth,
178 mode == MACVLAN_MODE_BRIDGE); 189 mode == MACVLAN_MODE_BRIDGE) ?:
190 netif_rx_ni(nskb);
179 macvlan_count_rx(vlan, skb->len + ETH_HLEN, 191 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
180 err == NET_RX_SUCCESS, 1); 192 err == NET_RX_SUCCESS, 1);
181 } 193 }
182 } 194 }
183} 195}
184 196
185/* called under rcu_read_lock() from netif_receive_skb */ 197static void macvlan_process_broadcast(struct work_struct *w)
186static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
187{ 198{
188 struct macvlan_port *port; 199 struct macvlan_port *port = container_of(w, struct macvlan_port,
189 struct sk_buff *skb = *pskb; 200 bc_work);
190 const struct ethhdr *eth = eth_hdr(skb); 201 struct sk_buff *skb;
191 const struct macvlan_dev *vlan; 202 struct sk_buff_head list;
192 const struct macvlan_dev *src; 203
193 struct net_device *dev; 204 skb_queue_head_init(&list);
194 unsigned int len = 0; 205
195 int ret = NET_RX_DROP; 206 spin_lock_bh(&port->bc_queue.lock);
207 skb_queue_splice_tail_init(&port->bc_queue, &list);
208 spin_unlock_bh(&port->bc_queue.lock);
209
210 while ((skb = __skb_dequeue(&list))) {
211 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
212
213 rcu_read_lock();
196 214
197 port = macvlan_port_get_rcu(skb->dev);
198 if (is_multicast_ether_addr(eth->h_dest)) {
199 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
200 if (!skb)
201 return RX_HANDLER_CONSUMED;
202 eth = eth_hdr(skb);
203 src = macvlan_hash_lookup(port, eth->h_source);
204 if (!src) 215 if (!src)
205 /* frame comes from an external address */ 216 /* frame comes from an external address */
206 macvlan_broadcast(skb, port, NULL, 217 macvlan_broadcast(skb, port, NULL,
@@ -213,20 +224,80 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
213 macvlan_broadcast(skb, port, src->dev, 224 macvlan_broadcast(skb, port, src->dev,
214 MACVLAN_MODE_VEPA | 225 MACVLAN_MODE_VEPA |
215 MACVLAN_MODE_BRIDGE); 226 MACVLAN_MODE_BRIDGE);
216 else if (src->mode == MACVLAN_MODE_BRIDGE) 227 else
217 /* 228 /*
218 * flood only to VEPA ports, bridge ports 229 * flood only to VEPA ports, bridge ports
219 * already saw the frame on the way out. 230 * already saw the frame on the way out.
220 */ 231 */
221 macvlan_broadcast(skb, port, src->dev, 232 macvlan_broadcast(skb, port, src->dev,
222 MACVLAN_MODE_VEPA); 233 MACVLAN_MODE_VEPA);
223 else { 234
235 rcu_read_unlock();
236
237 kfree_skb(skb);
238 }
239}
240
241static void macvlan_broadcast_enqueue(struct macvlan_port *port,
242 struct sk_buff *skb)
243{
244 struct sk_buff *nskb;
245 int err = -ENOMEM;
246
247 nskb = skb_clone(skb, GFP_ATOMIC);
248 if (!nskb)
249 goto err;
250
251 spin_lock(&port->bc_queue.lock);
252 if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
253 __skb_queue_tail(&port->bc_queue, nskb);
254 err = 0;
255 }
256 spin_unlock(&port->bc_queue.lock);
257
258 if (err)
259 goto free_nskb;
260
261 schedule_work(&port->bc_work);
262 return;
263
264free_nskb:
265 kfree_skb(nskb);
266err:
267 atomic_long_inc(&skb->dev->rx_dropped);
268}
269
270/* called under rcu_read_lock() from netif_receive_skb */
271static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
272{
273 struct macvlan_port *port;
274 struct sk_buff *skb = *pskb;
275 const struct ethhdr *eth = eth_hdr(skb);
276 const struct macvlan_dev *vlan;
277 const struct macvlan_dev *src;
278 struct net_device *dev;
279 unsigned int len = 0;
280 int ret = NET_RX_DROP;
281
282 port = macvlan_port_get_rcu(skb->dev);
283 if (is_multicast_ether_addr(eth->h_dest)) {
284 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
285 if (!skb)
286 return RX_HANDLER_CONSUMED;
287 eth = eth_hdr(skb);
288 src = macvlan_hash_lookup(port, eth->h_source);
289 if (src && src->mode != MACVLAN_MODE_VEPA &&
290 src->mode != MACVLAN_MODE_BRIDGE) {
224 /* forward to original port. */ 291 /* forward to original port. */
225 vlan = src; 292 vlan = src;
226 ret = macvlan_broadcast_one(skb, vlan, eth, 0); 293 ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
294 netif_rx(skb);
227 goto out; 295 goto out;
228 } 296 }
229 297
298 MACVLAN_SKB_CB(skb)->src = src;
299 macvlan_broadcast_enqueue(port, skb);
300
230 return RX_HANDLER_PASS; 301 return RX_HANDLER_PASS;
231 } 302 }
232 303
@@ -287,12 +358,26 @@ xmit_world:
287 return dev_queue_xmit(skb); 358 return dev_queue_xmit(skb);
288} 359}
289 360
361static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
362{
363#ifdef CONFIG_NET_POLL_CONTROLLER
364 if (vlan->netpoll)
365 netpoll_send_skb(vlan->netpoll, skb);
366#else
367 BUG();
368#endif
369 return NETDEV_TX_OK;
370}
371
290static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 372static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
291 struct net_device *dev) 373 struct net_device *dev)
292{ 374{
293 unsigned int len = skb->len; 375 unsigned int len = skb->len;
294 int ret; 376 int ret;
295 const struct macvlan_dev *vlan = netdev_priv(dev); 377 struct macvlan_dev *vlan = netdev_priv(dev);
378
379 if (unlikely(netpoll_tx_running(dev)))
380 return macvlan_netpoll_send_skb(vlan, skb);
296 381
297 if (vlan->fwd_priv) { 382 if (vlan->fwd_priv) {
298 skb->dev = vlan->lowerdev; 383 skb->dev = vlan->lowerdev;
@@ -424,35 +509,49 @@ hash_del:
424 return 0; 509 return 0;
425} 510}
426 511
427static int macvlan_set_mac_address(struct net_device *dev, void *p) 512static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
428{ 513{
429 struct macvlan_dev *vlan = netdev_priv(dev); 514 struct macvlan_dev *vlan = netdev_priv(dev);
430 struct net_device *lowerdev = vlan->lowerdev; 515 struct net_device *lowerdev = vlan->lowerdev;
431 struct sockaddr *addr = p;
432 int err; 516 int err;
433 517
434 if (!is_valid_ether_addr(addr->sa_data))
435 return -EADDRNOTAVAIL;
436
437 if (!(dev->flags & IFF_UP)) { 518 if (!(dev->flags & IFF_UP)) {
438 /* Just copy in the new address */ 519 /* Just copy in the new address */
439 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 520 ether_addr_copy(dev->dev_addr, addr);
440 } else { 521 } else {
441 /* Rehash and update the device filters */ 522 /* Rehash and update the device filters */
442 if (macvlan_addr_busy(vlan->port, addr->sa_data)) 523 if (macvlan_addr_busy(vlan->port, addr))
443 return -EBUSY; 524 return -EBUSY;
444 525
445 err = dev_uc_add(lowerdev, addr->sa_data); 526 if (!vlan->port->passthru) {
446 if (err) 527 err = dev_uc_add(lowerdev, addr);
447 return err; 528 if (err)
529 return err;
448 530
449 dev_uc_del(lowerdev, dev->dev_addr); 531 dev_uc_del(lowerdev, dev->dev_addr);
532 }
450 533
451 macvlan_hash_change_addr(vlan, addr->sa_data); 534 macvlan_hash_change_addr(vlan, addr);
452 } 535 }
453 return 0; 536 return 0;
454} 537}
455 538
539static int macvlan_set_mac_address(struct net_device *dev, void *p)
540{
541 struct macvlan_dev *vlan = netdev_priv(dev);
542 struct sockaddr *addr = p;
543
544 if (!is_valid_ether_addr(addr->sa_data))
545 return -EADDRNOTAVAIL;
546
547 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
548 dev_set_mac_address(vlan->lowerdev, addr);
549 return 0;
550 }
551
552 return macvlan_sync_address(dev, addr->sa_data);
553}
554
456static void macvlan_change_rx_flags(struct net_device *dev, int change) 555static void macvlan_change_rx_flags(struct net_device *dev, int change)
457{ 556{
458 struct macvlan_dev *vlan = netdev_priv(dev); 557 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,8 +666,7 @@ static void macvlan_uninit(struct net_device *dev)
567 666
568 free_percpu(vlan->pcpu_stats); 667 free_percpu(vlan->pcpu_stats);
569 668
570 port->count -= 1; 669 if (MACVLAN_PORT_IS_EMPTY(port))
571 if (!port->count)
572 macvlan_port_destroy(port->dev); 670 macvlan_port_destroy(port->dev);
573} 671}
574 672
@@ -705,6 +803,50 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
705 return features; 803 return features;
706} 804}
707 805
806#ifdef CONFIG_NET_POLL_CONTROLLER
807static void macvlan_dev_poll_controller(struct net_device *dev)
808{
809 return;
810}
811
812static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
813{
814 struct macvlan_dev *vlan = netdev_priv(dev);
815 struct net_device *real_dev = vlan->lowerdev;
816 struct netpoll *netpoll;
817 int err = 0;
818
819 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
820 err = -ENOMEM;
821 if (!netpoll)
822 goto out;
823
824 err = __netpoll_setup(netpoll, real_dev);
825 if (err) {
826 kfree(netpoll);
827 goto out;
828 }
829
830 vlan->netpoll = netpoll;
831
832out:
833 return err;
834}
835
836static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
837{
838 struct macvlan_dev *vlan = netdev_priv(dev);
839 struct netpoll *netpoll = vlan->netpoll;
840
841 if (!netpoll)
842 return;
843
844 vlan->netpoll = NULL;
845
846 __netpoll_free_async(netpoll);
847}
848#endif /* CONFIG_NET_POLL_CONTROLLER */
849
708static const struct ethtool_ops macvlan_ethtool_ops = { 850static const struct ethtool_ops macvlan_ethtool_ops = {
709 .get_link = ethtool_op_get_link, 851 .get_link = ethtool_op_get_link,
710 .get_settings = macvlan_ethtool_get_settings, 852 .get_settings = macvlan_ethtool_get_settings,
@@ -730,6 +872,11 @@ static const struct net_device_ops macvlan_netdev_ops = {
730 .ndo_fdb_del = macvlan_fdb_del, 872 .ndo_fdb_del = macvlan_fdb_del,
731 .ndo_fdb_dump = ndo_dflt_fdb_dump, 873 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level, 874 .ndo_get_lock_subclass = macvlan_get_nest_level,
875#ifdef CONFIG_NET_POLL_CONTROLLER
876 .ndo_poll_controller = macvlan_dev_poll_controller,
877 .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
878 .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
879#endif
733}; 880};
734 881
735void macvlan_common_setup(struct net_device *dev) 882void macvlan_common_setup(struct net_device *dev)
@@ -770,6 +917,9 @@ static int macvlan_port_create(struct net_device *dev)
770 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 917 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
771 INIT_HLIST_HEAD(&port->vlan_hash[i]); 918 INIT_HLIST_HEAD(&port->vlan_hash[i]);
772 919
920 skb_queue_head_init(&port->bc_queue);
921 INIT_WORK(&port->bc_work, macvlan_process_broadcast);
922
773 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); 923 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
774 if (err) 924 if (err)
775 kfree(port); 925 kfree(port);
@@ -782,6 +932,7 @@ static void macvlan_port_destroy(struct net_device *dev)
782{ 932{
783 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 933 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
784 934
935 cancel_work_sync(&port->bc_work);
785 dev->priv_flags &= ~IFF_MACVLAN_PORT; 936 dev->priv_flags &= ~IFF_MACVLAN_PORT;
786 netdev_rx_handler_unregister(dev); 937 netdev_rx_handler_unregister(dev);
787 kfree_rcu(port, rcu); 938 kfree_rcu(port, rcu);
@@ -868,13 +1019,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
868 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1019 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
869 1020
870 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 1021 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
871 if (port->count) 1022 if (!MACVLAN_PORT_IS_EMPTY(port))
872 return -EINVAL; 1023 return -EINVAL;
873 port->passthru = true; 1024 port->passthru = true;
874 eth_hw_addr_inherit(dev, lowerdev); 1025 eth_hw_addr_inherit(dev, lowerdev);
875 } 1026 }
876 1027
877 port->count += 1;
878 err = register_netdevice(dev); 1028 err = register_netdevice(dev);
879 if (err < 0) 1029 if (err < 0)
880 goto destroy_port; 1030 goto destroy_port;
@@ -892,8 +1042,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
892unregister_netdev: 1042unregister_netdev:
893 unregister_netdevice(dev); 1043 unregister_netdevice(dev);
894destroy_port: 1044destroy_port:
895 port->count -= 1; 1045 if (MACVLAN_PORT_IS_EMPTY(port))
896 if (!port->count)
897 macvlan_port_destroy(lowerdev); 1046 macvlan_port_destroy(lowerdev);
898 1047
899 return err; 1048 return err;
@@ -1028,6 +1177,25 @@ static int macvlan_device_event(struct notifier_block *unused,
1028 netdev_update_features(vlan->dev); 1177 netdev_update_features(vlan->dev);
1029 } 1178 }
1030 break; 1179 break;
1180 case NETDEV_CHANGEMTU:
1181 list_for_each_entry(vlan, &port->vlans, list) {
1182 if (vlan->dev->mtu <= dev->mtu)
1183 continue;
1184 dev_set_mtu(vlan->dev, dev->mtu);
1185 }
1186 break;
1187 case NETDEV_CHANGEADDR:
1188 if (!port->passthru)
1189 return NOTIFY_DONE;
1190
1191 vlan = list_first_entry_or_null(&port->vlans,
1192 struct macvlan_dev,
1193 list);
1194
1195 if (macvlan_sync_address(vlan->dev, dev->dev_addr))
1196 return NOTIFY_BAD;
1197
1198 break;
1031 case NETDEV_UNREGISTER: 1199 case NETDEV_UNREGISTER:
1032 /* twiddle thumbs on netns device moves */ 1200 /* twiddle thumbs on netns device moves */
1033 if (dev->reg_state != NETREG_UNREGISTERING) 1201 if (dev->reg_state != NETREG_UNREGISTERING)
@@ -1036,11 +1204,17 @@ static int macvlan_device_event(struct notifier_block *unused,
1036 list_for_each_entry_safe(vlan, next, &port->vlans, list) 1204 list_for_each_entry_safe(vlan, next, &port->vlans, list)
1037 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); 1205 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
1038 unregister_netdevice_many(&list_kill); 1206 unregister_netdevice_many(&list_kill);
1039 list_del(&list_kill);
1040 break; 1207 break;
1041 case NETDEV_PRE_TYPE_CHANGE: 1208 case NETDEV_PRE_TYPE_CHANGE:
1042 /* Forbid underlaying device to change its type. */ 1209 /* Forbid underlaying device to change its type. */
1043 return NOTIFY_BAD; 1210 return NOTIFY_BAD;
1211
1212 case NETDEV_NOTIFY_PEERS:
1213 case NETDEV_BONDING_FAILOVER:
1214 case NETDEV_RESEND_IGMP:
1215 /* Propagate to all vlans */
1216 list_for_each_entry(vlan, &port->vlans, list)
1217 call_netdevice_notifiers(event, vlan->dev);
1044 } 1218 }
1045 return NOTIFY_DONE; 1219 return NOTIFY_DONE;
1046} 1220}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 63aa9d9e34c5..5a7e6397440a 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -298,7 +298,6 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
298{ 298{
299 cmd->supported = SUPPORTED_Backplane; 299 cmd->supported = SUPPORTED_Backplane;
300 cmd->advertising = ADVERTISED_Backplane; 300 cmd->advertising = ADVERTISED_Backplane;
301 cmd->speed = SPEED_UNKNOWN;
302 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 301 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
303 cmd->duplex = DUPLEX_FULL; 302 cmd->duplex = DUPLEX_FULL;
304 cmd->port = PORT_OTHER; 303 cmd->port = PORT_OTHER;
@@ -348,7 +347,7 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
348 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); 347 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
349 348
350 ndev->netdev_ops = &ntb_netdev_ops; 349 ndev->netdev_ops = &ntb_netdev_ops;
351 SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops); 350 ndev->ethtool_ops = &ntb_ethtool_ops;
352 351
353 dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers); 352 dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
354 if (!dev->qp) { 353 if (!dev->qp) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6a17f92153b3..65de0cab8d07 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,6 +24,12 @@ config AMD_PHY
24 ---help--- 24 ---help---
25 Currently supports the am79c874 25 Currently supports the am79c874
26 26
27config AMD_XGBE_PHY
28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
29 depends on OF
30 ---help---
31 Currently supports the AMD 10GbE PHY
32
27config MARVELL_PHY 33config MARVELL_PHY
28 tristate "Drivers for Marvell PHYs" 34 tristate "Drivers for Marvell PHYs"
29 ---help--- 35 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 07d24024863e..7dc3d5b304cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
33obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o 33obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
34obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o 34obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
35obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o 35obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
36obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
new file mode 100644
index 000000000000..b57c22442867
--- /dev/null
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -0,0 +1,1357 @@
1/*
2 * AMD 10Gb Ethernet PHY driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 *
25 * License 2: Modified BSD
26 *
27 * Copyright (c) 2014 Advanced Micro Devices, Inc.
28 * All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * * Neither the name of Advanced Micro Devices, Inc. nor the
38 * names of its contributors may be used to endorse or promote products
39 * derived from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#include <linux/kernel.h>
54#include <linux/device.h>
55#include <linux/platform_device.h>
56#include <linux/string.h>
57#include <linux/errno.h>
58#include <linux/unistd.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
61#include <linux/init.h>
62#include <linux/delay.h>
63#include <linux/netdevice.h>
64#include <linux/etherdevice.h>
65#include <linux/skbuff.h>
66#include <linux/mm.h>
67#include <linux/module.h>
68#include <linux/mii.h>
69#include <linux/ethtool.h>
70#include <linux/phy.h>
71#include <linux/mdio.h>
72#include <linux/io.h>
73#include <linux/of.h>
74#include <linux/of_platform.h>
75#include <linux/of_device.h>
76#include <linux/uaccess.h>
77#include <asm/irq.h>
78
79
80MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
81MODULE_LICENSE("Dual BSD/GPL");
82MODULE_VERSION("1.0.0-a");
83MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
84
85#define XGBE_PHY_ID 0x000162d0
86#define XGBE_PHY_MASK 0xfffffff0
87
88#define XGBE_AN_INT_CMPLT 0x01
89#define XGBE_AN_INC_LINK 0x02
90#define XGBE_AN_PG_RCV 0x04
91
92#define XNP_MCF_NULL_MESSAGE 0x001
93#define XNP_ACK_PROCESSED (1 << 12)
94#define XNP_MP_FORMATTED (1 << 13)
95#define XNP_NP_EXCHANGE (1 << 15)
96
97#ifndef MDIO_PMA_10GBR_PMD_CTRL
98#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
99#endif
100#ifndef MDIO_PMA_10GBR_FEC_CTRL
101#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
102#endif
103#ifndef MDIO_AN_XNP
104#define MDIO_AN_XNP 0x0016
105#endif
106
107#ifndef MDIO_AN_INTMASK
108#define MDIO_AN_INTMASK 0x8001
109#endif
110#ifndef MDIO_AN_INT
111#define MDIO_AN_INT 0x8002
112#endif
113
114#ifndef MDIO_CTRL1_SPEED1G
115#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
116#endif
117
118/* SerDes integration register offsets */
119#define SIR0_STATUS 0x0040
120#define SIR1_SPEED 0x0000
121
122/* SerDes integration register entry bit positions and sizes */
123#define SIR0_STATUS_RX_READY_INDEX 0
124#define SIR0_STATUS_RX_READY_WIDTH 1
125#define SIR0_STATUS_TX_READY_INDEX 8
126#define SIR0_STATUS_TX_READY_WIDTH 1
127#define SIR1_SPEED_DATARATE_INDEX 4
128#define SIR1_SPEED_DATARATE_WIDTH 2
129#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
130#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
131#define SIR1_SPEED_PLLSEL_INDEX 3
132#define SIR1_SPEED_PLLSEL_WIDTH 1
133#define SIR1_SPEED_RATECHANGE_INDEX 6
134#define SIR1_SPEED_RATECHANGE_WIDTH 1
135#define SIR1_SPEED_TXAMP_INDEX 8
136#define SIR1_SPEED_TXAMP_WIDTH 4
137#define SIR1_SPEED_WORDMODE_INDEX 0
138#define SIR1_SPEED_WORDMODE_WIDTH 3
139
140#define SPEED_10000_CDR 0x7
141#define SPEED_10000_PLL 0x1
142#define SPEED_10000_RATE 0x0
143#define SPEED_10000_TXAMP 0xa
144#define SPEED_10000_WORD 0x7
145
146#define SPEED_2500_CDR 0x2
147#define SPEED_2500_PLL 0x0
148#define SPEED_2500_RATE 0x2
149#define SPEED_2500_TXAMP 0xf
150#define SPEED_2500_WORD 0x1
151
152#define SPEED_1000_CDR 0x2
153#define SPEED_1000_PLL 0x0
154#define SPEED_1000_RATE 0x3
155#define SPEED_1000_TXAMP 0xf
156#define SPEED_1000_WORD 0x1
157
158
159/* SerDes RxTx register offsets */
160#define RXTX_REG20 0x0050
161#define RXTX_REG114 0x01c8
162
163/* SerDes RxTx register entry bit positions and sizes */
164#define RXTX_REG20_BLWC_ENA_INDEX 2
165#define RXTX_REG20_BLWC_ENA_WIDTH 1
166#define RXTX_REG114_PQ_REG_INDEX 9
167#define RXTX_REG114_PQ_REG_WIDTH 7
168
169#define RXTX_10000_BLWC 0
170#define RXTX_10000_PQ 0x1e
171
172#define RXTX_2500_BLWC 1
173#define RXTX_2500_PQ 0xa
174
175#define RXTX_1000_BLWC 1
176#define RXTX_1000_PQ 0xa
177
178/* Bit setting and getting macros
179 * The get macro will extract the current bit field value from within
180 * the variable
181 *
182 * The set macro will clear the current bit field value within the
183 * variable and then set the bit field of the variable to the
184 * specified value
185 */
186#define GET_BITS(_var, _index, _width) \
187 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
188
189#define SET_BITS(_var, _index, _width, _val) \
190do { \
191 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
192 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
193} while (0)
194
195/* Macros for reading or writing SerDes integration registers
196 * The ioread macros will get bit fields or full values using the
197 * register definitions formed using the input names
198 *
199 * The iowrite macros will set bit fields or full values using the
200 * register definitions formed using the input names
201 */
202#define XSIR0_IOREAD(_priv, _reg) \
203 ioread16((_priv)->sir0_regs + _reg)
204
205#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
206 GET_BITS(XSIR0_IOREAD((_priv), _reg), \
207 _reg##_##_field##_INDEX, \
208 _reg##_##_field##_WIDTH)
209
210#define XSIR0_IOWRITE(_priv, _reg, _val) \
211 iowrite16((_val), (_priv)->sir0_regs + _reg)
212
213#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
214do { \
215 u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
216 SET_BITS(reg_val, \
217 _reg##_##_field##_INDEX, \
218 _reg##_##_field##_WIDTH, (_val)); \
219 XSIR0_IOWRITE((_priv), _reg, reg_val); \
220} while (0)
221
222#define XSIR1_IOREAD(_priv, _reg) \
223 ioread16((_priv)->sir1_regs + _reg)
224
225#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
226 GET_BITS(XSIR1_IOREAD((_priv), _reg), \
227 _reg##_##_field##_INDEX, \
228 _reg##_##_field##_WIDTH)
229
230#define XSIR1_IOWRITE(_priv, _reg, _val) \
231 iowrite16((_val), (_priv)->sir1_regs + _reg)
232
233#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
234do { \
235 u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
236 SET_BITS(reg_val, \
237 _reg##_##_field##_INDEX, \
238 _reg##_##_field##_WIDTH, (_val)); \
239 XSIR1_IOWRITE((_priv), _reg, reg_val); \
240} while (0)
241
242
243/* Macros for reading or writing SerDes RxTx registers
244 * The ioread macros will get bit fields or full values using the
245 * register definitions formed using the input names
246 *
247 * The iowrite macros will set bit fields or full values using the
248 * register definitions formed using the input names
249 */
250#define XRXTX_IOREAD(_priv, _reg) \
251 ioread16((_priv)->rxtx_regs + _reg)
252
253#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
254 GET_BITS(XRXTX_IOREAD((_priv), _reg), \
255 _reg##_##_field##_INDEX, \
256 _reg##_##_field##_WIDTH)
257
258#define XRXTX_IOWRITE(_priv, _reg, _val) \
259 iowrite16((_val), (_priv)->rxtx_regs + _reg)
260
261#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
262do { \
263 u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
264 SET_BITS(reg_val, \
265 _reg##_##_field##_INDEX, \
266 _reg##_##_field##_WIDTH, (_val)); \
267 XRXTX_IOWRITE((_priv), _reg, reg_val); \
268} while (0)
269
270
271enum amd_xgbe_phy_an {
272 AMD_XGBE_AN_READY = 0,
273 AMD_XGBE_AN_START,
274 AMD_XGBE_AN_EVENT,
275 AMD_XGBE_AN_PAGE_RECEIVED,
276 AMD_XGBE_AN_INCOMPAT_LINK,
277 AMD_XGBE_AN_COMPLETE,
278 AMD_XGBE_AN_NO_LINK,
279 AMD_XGBE_AN_EXIT,
280 AMD_XGBE_AN_ERROR,
281};
282
283enum amd_xgbe_phy_rx {
284 AMD_XGBE_RX_READY = 0,
285 AMD_XGBE_RX_BPA,
286 AMD_XGBE_RX_XNP,
287 AMD_XGBE_RX_COMPLETE,
288};
289
290enum amd_xgbe_phy_mode {
291 AMD_XGBE_MODE_KR,
292 AMD_XGBE_MODE_KX,
293};
294
295struct amd_xgbe_phy_priv {
296 struct platform_device *pdev;
297 struct device *dev;
298
299 struct phy_device *phydev;
300
301 /* SerDes related mmio resources */
302 struct resource *rxtx_res;
303 struct resource *sir0_res;
304 struct resource *sir1_res;
305
306 /* SerDes related mmio registers */
307 void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
308 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
309 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
310
311 /* Maintain link status for re-starting auto-negotiation */
312 unsigned int link;
313 enum amd_xgbe_phy_mode mode;
314
315 /* Auto-negotiation state machine support */
316 struct mutex an_mutex;
317 enum amd_xgbe_phy_an an_result;
318 enum amd_xgbe_phy_an an_state;
319 enum amd_xgbe_phy_rx kr_state;
320 enum amd_xgbe_phy_rx kx_state;
321 struct work_struct an_work;
322 struct workqueue_struct *an_workqueue;
323};
324
325static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
326{
327 int ret;
328
329 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
330 if (ret < 0)
331 return ret;
332
333 ret |= 0x02;
334 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
335
336 return 0;
337}
338
339static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
340{
341 int ret;
342
343 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
344 if (ret < 0)
345 return ret;
346
347 ret &= ~0x02;
348 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
349
350 return 0;
351}
352
353static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
354{
355 int ret;
356
357 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
358 if (ret < 0)
359 return ret;
360
361 ret |= MDIO_CTRL1_LPOWER;
362 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
363
364 usleep_range(75, 100);
365
366 ret &= ~MDIO_CTRL1_LPOWER;
367 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
368
369 return 0;
370}
371
372static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
373{
374 struct amd_xgbe_phy_priv *priv = phydev->priv;
375
376 /* Assert Rx and Tx ratechange */
377 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
378}
379
380static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
381{
382 struct amd_xgbe_phy_priv *priv = phydev->priv;
383
384 /* Release Rx and Tx ratechange */
385 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
386
387 /* Wait for Rx and Tx ready */
388 while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) &&
389 !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY))
390 usleep_range(10, 20);
391}
392
393static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
394{
395 struct amd_xgbe_phy_priv *priv = phydev->priv;
396 int ret;
397
398 /* Enable KR training */
399 ret = amd_xgbe_an_enable_kr_training(phydev);
400 if (ret < 0)
401 return ret;
402
403 /* Set PCS to KR/10G speed */
404 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
405 if (ret < 0)
406 return ret;
407
408 ret &= ~MDIO_PCS_CTRL2_TYPE;
409 ret |= MDIO_PCS_CTRL2_10GBR;
410 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
411
412 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
413 if (ret < 0)
414 return ret;
415
416 ret &= ~MDIO_CTRL1_SPEEDSEL;
417 ret |= MDIO_CTRL1_SPEED10G;
418 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
419
420 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
421 if (ret < 0)
422 return ret;
423
424 /* Set SerDes to 10G speed */
425 amd_xgbe_phy_serdes_start_ratechange(phydev);
426
427 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
428 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
429 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
430 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
431 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
432
433 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
434 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
435
436 amd_xgbe_phy_serdes_complete_ratechange(phydev);
437
438 priv->mode = AMD_XGBE_MODE_KR;
439
440 return 0;
441}
442
443static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
444{
445 struct amd_xgbe_phy_priv *priv = phydev->priv;
446 int ret;
447
448 /* Disable KR training */
449 ret = amd_xgbe_an_disable_kr_training(phydev);
450 if (ret < 0)
451 return ret;
452
453 /* Set PCS to KX/1G speed */
454 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
455 if (ret < 0)
456 return ret;
457
458 ret &= ~MDIO_PCS_CTRL2_TYPE;
459 ret |= MDIO_PCS_CTRL2_10GBX;
460 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
461
462 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
463 if (ret < 0)
464 return ret;
465
466 ret &= ~MDIO_CTRL1_SPEEDSEL;
467 ret |= MDIO_CTRL1_SPEED1G;
468 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
469
470 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
471 if (ret < 0)
472 return ret;
473
474 /* Set SerDes to 2.5G speed */
475 amd_xgbe_phy_serdes_start_ratechange(phydev);
476
477 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
478 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
479 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
480 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
481 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
482
483 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
484 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
485
486 amd_xgbe_phy_serdes_complete_ratechange(phydev);
487
488 priv->mode = AMD_XGBE_MODE_KX;
489
490 return 0;
491}
492
493static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
494{
495 struct amd_xgbe_phy_priv *priv = phydev->priv;
496 int ret;
497
498 /* Disable KR training */
499 ret = amd_xgbe_an_disable_kr_training(phydev);
500 if (ret < 0)
501 return ret;
502
503 /* Set PCS to KX/1G speed */
504 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
505 if (ret < 0)
506 return ret;
507
508 ret &= ~MDIO_PCS_CTRL2_TYPE;
509 ret |= MDIO_PCS_CTRL2_10GBX;
510 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
511
512 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
513 if (ret < 0)
514 return ret;
515
516 ret &= ~MDIO_CTRL1_SPEEDSEL;
517 ret |= MDIO_CTRL1_SPEED1G;
518 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
519
520 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
521 if (ret < 0)
522 return ret;
523
524 /* Set SerDes to 1G speed */
525 amd_xgbe_phy_serdes_start_ratechange(phydev);
526
527 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
528 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
529 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
530 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
531 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
532
533 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
534 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
535
536 amd_xgbe_phy_serdes_complete_ratechange(phydev);
537
538 priv->mode = AMD_XGBE_MODE_KX;
539
540 return 0;
541}
542
543static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
544{
545 struct amd_xgbe_phy_priv *priv = phydev->priv;
546 int ret;
547
548 /* If we are in KR switch to KX, and vice-versa */
549 if (priv->mode == AMD_XGBE_MODE_KR)
550 ret = amd_xgbe_phy_gmii_mode(phydev);
551 else
552 ret = amd_xgbe_phy_xgmii_mode(phydev);
553
554 return ret;
555}
556
557static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
558{
559 int ret;
560
561 ret = amd_xgbe_phy_switch_mode(phydev);
562 if (ret < 0)
563 return AMD_XGBE_AN_ERROR;
564
565 return AMD_XGBE_AN_START;
566}
567
568static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
569 enum amd_xgbe_phy_rx *state)
570{
571 struct amd_xgbe_phy_priv *priv = phydev->priv;
572 int ad_reg, lp_reg, ret;
573
574 *state = AMD_XGBE_RX_COMPLETE;
575
576 /* If we're in KX mode then we're done */
577 if (priv->mode == AMD_XGBE_MODE_KX)
578 return AMD_XGBE_AN_EVENT;
579
580 /* Enable/Disable FEC */
581 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
582 if (ad_reg < 0)
583 return AMD_XGBE_AN_ERROR;
584
585 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
586 if (lp_reg < 0)
587 return AMD_XGBE_AN_ERROR;
588
589 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
590 if (ret < 0)
591 return AMD_XGBE_AN_ERROR;
592
593 if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
594 ret |= 0x01;
595 else
596 ret &= ~0x01;
597
598 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
599
600 /* Start KR training */
601 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
602 if (ret < 0)
603 return AMD_XGBE_AN_ERROR;
604
605 ret |= 0x01;
606 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
607
608 return AMD_XGBE_AN_EVENT;
609}
610
611static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
612 enum amd_xgbe_phy_rx *state)
613{
614 u16 msg;
615
616 *state = AMD_XGBE_RX_XNP;
617
618 msg = XNP_MCF_NULL_MESSAGE;
619 msg |= XNP_MP_FORMATTED;
620
621 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
622 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
623 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
624
625 return AMD_XGBE_AN_EVENT;
626}
627
628static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
629 enum amd_xgbe_phy_rx *state)
630{
631 struct amd_xgbe_phy_priv *priv = phydev->priv;
632 unsigned int link_support;
633 int ret, ad_reg, lp_reg;
634
635 /* Read Base Ability register 2 first */
636 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
637 if (ret < 0)
638 return AMD_XGBE_AN_ERROR;
639
640 /* Check for a supported mode, otherwise restart in a different one */
641 link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
642 if (!(ret & link_support))
643 return amd_xgbe_an_switch_mode(phydev);
644
645 /* Check Extended Next Page support */
646 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
647 if (ad_reg < 0)
648 return AMD_XGBE_AN_ERROR;
649
650 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
651 if (lp_reg < 0)
652 return AMD_XGBE_AN_ERROR;
653
654 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
655 amd_xgbe_an_tx_xnp(phydev, state) :
656 amd_xgbe_an_tx_training(phydev, state);
657}
658
659static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
660 enum amd_xgbe_phy_rx *state)
661{
662 int ad_reg, lp_reg;
663
664 /* Check Extended Next Page support */
665 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
666 if (ad_reg < 0)
667 return AMD_XGBE_AN_ERROR;
668
669 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
670 if (lp_reg < 0)
671 return AMD_XGBE_AN_ERROR;
672
673 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
674 amd_xgbe_an_tx_xnp(phydev, state) :
675 amd_xgbe_an_tx_training(phydev, state);
676}
677
678static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
679{
680 struct amd_xgbe_phy_priv *priv = phydev->priv;
681 int ret;
682
683 /* Be sure we aren't looping trying to negotiate */
684 if (priv->mode == AMD_XGBE_MODE_KR) {
685 if (priv->kr_state != AMD_XGBE_RX_READY)
686 return AMD_XGBE_AN_NO_LINK;
687 priv->kr_state = AMD_XGBE_RX_BPA;
688 } else {
689 if (priv->kx_state != AMD_XGBE_RX_READY)
690 return AMD_XGBE_AN_NO_LINK;
691 priv->kx_state = AMD_XGBE_RX_BPA;
692 }
693
694 /* Set up Advertisement register 3 first */
695 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
696 if (ret < 0)
697 return AMD_XGBE_AN_ERROR;
698
699 if (phydev->supported & SUPPORTED_10000baseR_FEC)
700 ret |= 0xc000;
701 else
702 ret &= ~0xc000;
703
704 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
705
706 /* Set up Advertisement register 2 next */
707 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
708 if (ret < 0)
709 return AMD_XGBE_AN_ERROR;
710
711 if (phydev->supported & SUPPORTED_10000baseKR_Full)
712 ret |= 0x80;
713 else
714 ret &= ~0x80;
715
716 if (phydev->supported & SUPPORTED_1000baseKX_Full)
717 ret |= 0x20;
718 else
719 ret &= ~0x20;
720
721 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
722
723 /* Set up Advertisement register 1 last */
724 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
725 if (ret < 0)
726 return AMD_XGBE_AN_ERROR;
727
728 if (phydev->supported & SUPPORTED_Pause)
729 ret |= 0x400;
730 else
731 ret &= ~0x400;
732
733 if (phydev->supported & SUPPORTED_Asym_Pause)
734 ret |= 0x800;
735 else
736 ret &= ~0x800;
737
738 /* We don't intend to perform XNP */
739 ret &= ~XNP_NP_EXCHANGE;
740
741 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
742
743 /* Enable and start auto-negotiation */
744 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
745
746 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
747 if (ret < 0)
748 return AMD_XGBE_AN_ERROR;
749
750 ret |= MDIO_AN_CTRL1_ENABLE;
751 ret |= MDIO_AN_CTRL1_RESTART;
752 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
753
754 return AMD_XGBE_AN_EVENT;
755}
756
757static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
758{
759 enum amd_xgbe_phy_an new_state;
760 int ret;
761
762 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
763 if (ret < 0)
764 return AMD_XGBE_AN_ERROR;
765
766 new_state = AMD_XGBE_AN_EVENT;
767 if (ret & XGBE_AN_PG_RCV)
768 new_state = AMD_XGBE_AN_PAGE_RECEIVED;
769 else if (ret & XGBE_AN_INC_LINK)
770 new_state = AMD_XGBE_AN_INCOMPAT_LINK;
771 else if (ret & XGBE_AN_INT_CMPLT)
772 new_state = AMD_XGBE_AN_COMPLETE;
773
774 if (new_state != AMD_XGBE_AN_EVENT)
775 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
776
777 return new_state;
778}
779
780static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
781{
782 struct amd_xgbe_phy_priv *priv = phydev->priv;
783 enum amd_xgbe_phy_rx *state;
784 int ret;
785
786 state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
787 : &priv->kx_state;
788
789 switch (*state) {
790 case AMD_XGBE_RX_BPA:
791 ret = amd_xgbe_an_rx_bpa(phydev, state);
792 break;
793
794 case AMD_XGBE_RX_XNP:
795 ret = amd_xgbe_an_rx_xnp(phydev, state);
796 break;
797
798 default:
799 ret = AMD_XGBE_AN_ERROR;
800 }
801
802 return ret;
803}
804
805static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
806{
807 return amd_xgbe_an_switch_mode(phydev);
808}
809
810static void amd_xgbe_an_state_machine(struct work_struct *work)
811{
812 struct amd_xgbe_phy_priv *priv = container_of(work,
813 struct amd_xgbe_phy_priv,
814 an_work);
815 struct phy_device *phydev = priv->phydev;
816 enum amd_xgbe_phy_an cur_state;
817 int sleep;
818
819 while (1) {
820 mutex_lock(&priv->an_mutex);
821
822 cur_state = priv->an_state;
823
824 switch (priv->an_state) {
825 case AMD_XGBE_AN_START:
826 priv->an_state = amd_xgbe_an_start(phydev);
827 break;
828
829 case AMD_XGBE_AN_EVENT:
830 priv->an_state = amd_xgbe_an_event(phydev);
831 break;
832
833 case AMD_XGBE_AN_PAGE_RECEIVED:
834 priv->an_state = amd_xgbe_an_page_received(phydev);
835 break;
836
837 case AMD_XGBE_AN_INCOMPAT_LINK:
838 priv->an_state = amd_xgbe_an_incompat_link(phydev);
839 break;
840
841 case AMD_XGBE_AN_COMPLETE:
842 case AMD_XGBE_AN_NO_LINK:
843 case AMD_XGBE_AN_EXIT:
844 goto exit_unlock;
845
846 default:
847 priv->an_state = AMD_XGBE_AN_ERROR;
848 }
849
850 if (priv->an_state == AMD_XGBE_AN_ERROR) {
851 netdev_err(phydev->attached_dev,
852 "error during auto-negotiation, state=%u\n",
853 cur_state);
854 goto exit_unlock;
855 }
856
857 sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
858
859 mutex_unlock(&priv->an_mutex);
860
861 if (sleep)
862 usleep_range(20, 50);
863 }
864
865exit_unlock:
866 priv->an_result = priv->an_state;
867 priv->an_state = AMD_XGBE_AN_READY;
868
869 mutex_unlock(&priv->an_mutex);
870}
871
872static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
873{
874 int count, ret;
875
876 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
877 if (ret < 0)
878 return ret;
879
880 ret |= MDIO_CTRL1_RESET;
881 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
882
883 count = 50;
884 do {
885 msleep(20);
886 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
887 if (ret < 0)
888 return ret;
889 } while ((ret & MDIO_CTRL1_RESET) && --count);
890
891 if (ret & MDIO_CTRL1_RESET)
892 return -ETIMEDOUT;
893
894 return 0;
895}
896
897static int amd_xgbe_phy_config_init(struct phy_device *phydev)
898{
899 /* Initialize supported features */
900 phydev->supported = SUPPORTED_Autoneg;
901 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
902 phydev->supported |= SUPPORTED_Backplane;
903 phydev->supported |= SUPPORTED_1000baseKX_Full |
904 SUPPORTED_2500baseX_Full;
905 phydev->supported |= SUPPORTED_10000baseKR_Full |
906 SUPPORTED_10000baseR_FEC;
907 phydev->advertising = phydev->supported;
908
909 /* Turn off and clear interrupts */
910 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
911 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
912
913 return 0;
914}
915
916static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
917{
918 int ret;
919
920 /* Disable auto-negotiation */
921 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
922 if (ret < 0)
923 return ret;
924
925 ret &= ~MDIO_AN_CTRL1_ENABLE;
926 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
927
928 /* Validate/Set specified speed */
929 switch (phydev->speed) {
930 case SPEED_10000:
931 ret = amd_xgbe_phy_xgmii_mode(phydev);
932 break;
933
934 case SPEED_2500:
935 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
936 break;
937
938 case SPEED_1000:
939 ret = amd_xgbe_phy_gmii_mode(phydev);
940 break;
941
942 default:
943 ret = -EINVAL;
944 }
945
946 if (ret < 0)
947 return ret;
948
949 /* Validate duplex mode */
950 if (phydev->duplex != DUPLEX_FULL)
951 return -EINVAL;
952
953 phydev->pause = 0;
954 phydev->asym_pause = 0;
955
956 return 0;
957}
958
959static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
960{
961 struct amd_xgbe_phy_priv *priv = phydev->priv;
962 u32 mmd_mask = phydev->c45_ids.devices_in_package;
963 int ret;
964
965 if (phydev->autoneg != AUTONEG_ENABLE)
966 return amd_xgbe_phy_setup_forced(phydev);
967
968 /* Make sure we have the AN MMD present */
969 if (!(mmd_mask & MDIO_DEVS_AN))
970 return -EINVAL;
971
972 /* Get the current speed mode */
973 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
974 if (ret < 0)
975 return ret;
976
977 /* Start/Restart the auto-negotiation state machine */
978 mutex_lock(&priv->an_mutex);
979 priv->an_result = AMD_XGBE_AN_READY;
980 priv->an_state = AMD_XGBE_AN_START;
981 priv->kr_state = AMD_XGBE_RX_READY;
982 priv->kx_state = AMD_XGBE_RX_READY;
983 mutex_unlock(&priv->an_mutex);
984
985 queue_work(priv->an_workqueue, &priv->an_work);
986
987 return 0;
988}
989
990static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
991{
992 struct amd_xgbe_phy_priv *priv = phydev->priv;
993 enum amd_xgbe_phy_an state;
994
995 mutex_lock(&priv->an_mutex);
996 state = priv->an_result;
997 mutex_unlock(&priv->an_mutex);
998
999 return (state == AMD_XGBE_AN_COMPLETE);
1000}
1001
1002static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1003{
1004 struct amd_xgbe_phy_priv *priv = phydev->priv;
1005 enum amd_xgbe_phy_an state;
1006 unsigned int check_again, autoneg;
1007 int ret;
1008
1009 /* If we're doing auto-negotiation don't report link down */
1010 mutex_lock(&priv->an_mutex);
1011 state = priv->an_state;
1012 mutex_unlock(&priv->an_mutex);
1013
1014 if (state != AMD_XGBE_AN_READY) {
1015 phydev->link = 1;
1016 return 0;
1017 }
1018
1019 /* Since the device can be in the wrong mode when a link is
1020 * (re-)established (cable connected after the interface is
1021 * up, etc.), the link status may report no link. If there
1022 * is no link, try switching modes and checking the status
1023 * again.
1024 */
1025 check_again = 1;
1026again:
1027 /* Link status is latched low, so read once to clear
1028 * and then read again to get current state
1029 */
1030 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1031 if (ret < 0)
1032 return ret;
1033
1034 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1035 if (ret < 0)
1036 return ret;
1037
1038 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1039
1040 if (!phydev->link) {
1041 ret = amd_xgbe_phy_switch_mode(phydev);
1042 if (check_again) {
1043 check_again = 0;
1044 goto again;
1045 }
1046 }
1047
1048 autoneg = (phydev->link && !priv->link) ? 1 : 0;
1049 priv->link = phydev->link;
1050 if (autoneg) {
1051 /* Link is (back) up, re-start auto-negotiation */
1052 ret = amd_xgbe_phy_config_aneg(phydev);
1053 if (ret < 0)
1054 return ret;
1055 }
1056
1057 return 0;
1058}
1059
1060static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1061{
1062 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1063 int ret, mode, ad_ret, lp_ret;
1064
1065 ret = amd_xgbe_phy_update_link(phydev);
1066 if (ret)
1067 return ret;
1068
1069 mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1070 if (mode < 0)
1071 return mode;
1072 mode &= MDIO_PCS_CTRL2_TYPE;
1073
1074 if (phydev->autoneg == AUTONEG_ENABLE) {
1075 if (!(mmd_mask & MDIO_DEVS_AN))
1076 return -EINVAL;
1077
1078 if (!amd_xgbe_phy_aneg_done(phydev))
1079 return 0;
1080
1081 /* Compare Advertisement and Link Partner register 1 */
1082 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1083 if (ad_ret < 0)
1084 return ad_ret;
1085 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1086 if (lp_ret < 0)
1087 return lp_ret;
1088
1089 ad_ret &= lp_ret;
1090 phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1091 phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1092
1093 /* Compare Advertisement and Link Partner register 2 */
1094 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1095 MDIO_AN_ADVERTISE + 1);
1096 if (ad_ret < 0)
1097 return ad_ret;
1098 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1099 if (lp_ret < 0)
1100 return lp_ret;
1101
1102 ad_ret &= lp_ret;
1103 if (ad_ret & 0x80) {
1104 phydev->speed = SPEED_10000;
1105 if (mode != MDIO_PCS_CTRL2_10GBR) {
1106 ret = amd_xgbe_phy_xgmii_mode(phydev);
1107 if (ret < 0)
1108 return ret;
1109 }
1110 } else {
1111 phydev->speed = SPEED_1000;
1112 if (mode == MDIO_PCS_CTRL2_10GBR) {
1113 ret = amd_xgbe_phy_gmii_mode(phydev);
1114 if (ret < 0)
1115 return ret;
1116 }
1117 }
1118
1119 phydev->duplex = DUPLEX_FULL;
1120 } else {
1121 phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000
1122 : SPEED_1000;
1123 phydev->duplex = DUPLEX_FULL;
1124 phydev->pause = 0;
1125 phydev->asym_pause = 0;
1126 }
1127
1128 return 0;
1129}
1130
1131static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1132{
1133 int ret;
1134
1135 mutex_lock(&phydev->lock);
1136
1137 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1138 if (ret < 0)
1139 goto unlock;
1140
1141 ret |= MDIO_CTRL1_LPOWER;
1142 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1143
1144 ret = 0;
1145
1146unlock:
1147 mutex_unlock(&phydev->lock);
1148
1149 return ret;
1150}
1151
1152static int amd_xgbe_phy_resume(struct phy_device *phydev)
1153{
1154 int ret;
1155
1156 mutex_lock(&phydev->lock);
1157
1158 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1159 if (ret < 0)
1160 goto unlock;
1161
1162 ret &= ~MDIO_CTRL1_LPOWER;
1163 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1164
1165 ret = 0;
1166
1167unlock:
1168 mutex_unlock(&phydev->lock);
1169
1170 return ret;
1171}
1172
1173static int amd_xgbe_phy_probe(struct phy_device *phydev)
1174{
1175 struct amd_xgbe_phy_priv *priv;
1176 struct platform_device *pdev;
1177 struct device *dev;
1178 char *wq_name;
1179 int ret;
1180
1181 if (!phydev->dev.of_node)
1182 return -EINVAL;
1183
1184 pdev = of_find_device_by_node(phydev->dev.of_node);
1185 if (!pdev)
1186 return -EINVAL;
1187 dev = &pdev->dev;
1188
1189 wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
1190 if (!wq_name) {
1191 ret = -ENOMEM;
1192 goto err_pdev;
1193 }
1194
1195 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1196 if (!priv) {
1197 ret = -ENOMEM;
1198 goto err_name;
1199 }
1200
1201 priv->pdev = pdev;
1202 priv->dev = dev;
1203 priv->phydev = phydev;
1204
1205 /* Get the device mmio areas */
1206 priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1207 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1208 if (IS_ERR(priv->rxtx_regs)) {
1209 dev_err(dev, "rxtx ioremap failed\n");
1210 ret = PTR_ERR(priv->rxtx_regs);
1211 goto err_priv;
1212 }
1213
1214 priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1215 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1216 if (IS_ERR(priv->sir0_regs)) {
1217 dev_err(dev, "sir0 ioremap failed\n");
1218 ret = PTR_ERR(priv->sir0_regs);
1219 goto err_rxtx;
1220 }
1221
1222 priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1223 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1224 if (IS_ERR(priv->sir1_regs)) {
1225 dev_err(dev, "sir1 ioremap failed\n");
1226 ret = PTR_ERR(priv->sir1_regs);
1227 goto err_sir0;
1228 }
1229
1230 priv->link = 1;
1231
1232 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1233 if (ret < 0)
1234 goto err_sir1;
1235 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
1236 priv->mode = AMD_XGBE_MODE_KR;
1237 else
1238 priv->mode = AMD_XGBE_MODE_KX;
1239
1240 mutex_init(&priv->an_mutex);
1241 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1242 priv->an_workqueue = create_singlethread_workqueue(wq_name);
1243 if (!priv->an_workqueue) {
1244 ret = -ENOMEM;
1245 goto err_sir1;
1246 }
1247
1248 phydev->priv = priv;
1249
1250 kfree(wq_name);
1251 of_dev_put(pdev);
1252
1253 return 0;
1254
1255err_sir1:
1256 devm_iounmap(dev, priv->sir1_regs);
1257 devm_release_mem_region(dev, priv->sir1_res->start,
1258 resource_size(priv->sir1_res));
1259
1260err_sir0:
1261 devm_iounmap(dev, priv->sir0_regs);
1262 devm_release_mem_region(dev, priv->sir0_res->start,
1263 resource_size(priv->sir0_res));
1264
1265err_rxtx:
1266 devm_iounmap(dev, priv->rxtx_regs);
1267 devm_release_mem_region(dev, priv->rxtx_res->start,
1268 resource_size(priv->rxtx_res));
1269
1270err_priv:
1271 devm_kfree(dev, priv);
1272
1273err_name:
1274 kfree(wq_name);
1275
1276err_pdev:
1277 of_dev_put(pdev);
1278
1279 return ret;
1280}
1281
1282static void amd_xgbe_phy_remove(struct phy_device *phydev)
1283{
1284 struct amd_xgbe_phy_priv *priv = phydev->priv;
1285 struct device *dev = priv->dev;
1286
1287 /* Stop any in process auto-negotiation */
1288 mutex_lock(&priv->an_mutex);
1289 priv->an_state = AMD_XGBE_AN_EXIT;
1290 mutex_unlock(&priv->an_mutex);
1291
1292 flush_workqueue(priv->an_workqueue);
1293 destroy_workqueue(priv->an_workqueue);
1294
1295 /* Release resources */
1296 devm_iounmap(dev, priv->sir1_regs);
1297 devm_release_mem_region(dev, priv->sir1_res->start,
1298 resource_size(priv->sir1_res));
1299
1300 devm_iounmap(dev, priv->sir0_regs);
1301 devm_release_mem_region(dev, priv->sir0_res->start,
1302 resource_size(priv->sir0_res));
1303
1304 devm_iounmap(dev, priv->rxtx_regs);
1305 devm_release_mem_region(dev, priv->rxtx_res->start,
1306 resource_size(priv->rxtx_res));
1307
1308 devm_kfree(dev, priv);
1309}
1310
1311static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1312{
1313 return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1314}
1315
1316static struct phy_driver amd_xgbe_phy_driver[] = {
1317 {
1318 .phy_id = XGBE_PHY_ID,
1319 .phy_id_mask = XGBE_PHY_MASK,
1320 .name = "AMD XGBE PHY",
1321 .features = 0,
1322 .probe = amd_xgbe_phy_probe,
1323 .remove = amd_xgbe_phy_remove,
1324 .soft_reset = amd_xgbe_phy_soft_reset,
1325 .config_init = amd_xgbe_phy_config_init,
1326 .suspend = amd_xgbe_phy_suspend,
1327 .resume = amd_xgbe_phy_resume,
1328 .config_aneg = amd_xgbe_phy_config_aneg,
1329 .aneg_done = amd_xgbe_phy_aneg_done,
1330 .read_status = amd_xgbe_phy_read_status,
1331 .match_phy_device = amd_xgbe_match_phy_device,
1332 .driver = {
1333 .owner = THIS_MODULE,
1334 },
1335 },
1336};
1337
1338static int __init amd_xgbe_phy_init(void)
1339{
1340 return phy_drivers_register(amd_xgbe_phy_driver,
1341 ARRAY_SIZE(amd_xgbe_phy_driver));
1342}
1343
1344static void __exit amd_xgbe_phy_exit(void)
1345{
1346 phy_drivers_unregister(amd_xgbe_phy_driver,
1347 ARRAY_SIZE(amd_xgbe_phy_driver));
1348}
1349
1350module_init(amd_xgbe_phy_init);
1351module_exit(amd_xgbe_phy_exit);
1352
1353static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
1354 { XGBE_PHY_ID, XGBE_PHY_MASK },
1355 { }
1356};
1357MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 643464d5a727..6c622aedbae1 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -144,41 +144,11 @@ static int at803x_resume(struct phy_device *phydev)
144 144
145static int at803x_config_init(struct phy_device *phydev) 145static int at803x_config_init(struct phy_device *phydev)
146{ 146{
147 int val;
148 int ret; 147 int ret;
149 u32 features;
150
151 features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
152 SUPPORTED_FIBRE | SUPPORTED_BNC;
153
154 val = phy_read(phydev, MII_BMSR);
155 if (val < 0)
156 return val;
157
158 if (val & BMSR_ANEGCAPABLE)
159 features |= SUPPORTED_Autoneg;
160 if (val & BMSR_100FULL)
161 features |= SUPPORTED_100baseT_Full;
162 if (val & BMSR_100HALF)
163 features |= SUPPORTED_100baseT_Half;
164 if (val & BMSR_10FULL)
165 features |= SUPPORTED_10baseT_Full;
166 if (val & BMSR_10HALF)
167 features |= SUPPORTED_10baseT_Half;
168
169 if (val & BMSR_ESTATEN) {
170 val = phy_read(phydev, MII_ESTATUS);
171 if (val < 0)
172 return val;
173
174 if (val & ESTATUS_1000_TFULL)
175 features |= SUPPORTED_1000baseT_Full;
176 if (val & ESTATUS_1000_THALF)
177 features |= SUPPORTED_1000baseT_Half;
178 }
179 148
180 phydev->supported = features; 149 ret = genphy_config_init(phydev);
181 phydev->advertising = features; 150 if (ret < 0)
151 return ret;
182 152
183 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 153 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
184 ret = phy_write(phydev, AT803X_DEBUG_ADDR, 154 ret = phy_write(phydev, AT803X_DEBUG_ADDR,
@@ -283,8 +253,7 @@ static int __init atheros_init(void)
283 253
284static void __exit atheros_exit(void) 254static void __exit atheros_exit(void)
285{ 255{
286 return phy_drivers_unregister(at803x_driver, 256 phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
287 ARRAY_SIZE(at803x_driver));
288} 257}
289 258
290module_init(atheros_init); 259module_init(atheros_init);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ba55adfc7aae..d60d875cb445 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -21,6 +21,7 @@
21#include <linux/phy_fixed.h> 21#include <linux/phy_fixed.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/of.h>
24 25
25#define MII_REGS_NUM 29 26#define MII_REGS_NUM 29
26 27
@@ -31,7 +32,7 @@ struct fixed_mdio_bus {
31}; 32};
32 33
33struct fixed_phy { 34struct fixed_phy {
34 int id; 35 int addr;
35 u16 regs[MII_REGS_NUM]; 36 u16 regs[MII_REGS_NUM];
36 struct phy_device *phydev; 37 struct phy_device *phydev;
37 struct fixed_phy_status status; 38 struct fixed_phy_status status;
@@ -104,8 +105,8 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
104 if (fp->status.asym_pause) 105 if (fp->status.asym_pause)
105 lpa |= LPA_PAUSE_ASYM; 106 lpa |= LPA_PAUSE_ASYM;
106 107
107 fp->regs[MII_PHYSID1] = fp->id >> 16; 108 fp->regs[MII_PHYSID1] = 0;
108 fp->regs[MII_PHYSID2] = fp->id; 109 fp->regs[MII_PHYSID2] = 0;
109 110
110 fp->regs[MII_BMSR] = bmsr; 111 fp->regs[MII_BMSR] = bmsr;
111 fp->regs[MII_BMCR] = bmcr; 112 fp->regs[MII_BMCR] = bmcr;
@@ -115,7 +116,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
115 return 0; 116 return 0;
116} 117}
117 118
118static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num) 119static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
119{ 120{
120 struct fixed_mdio_bus *fmb = bus->priv; 121 struct fixed_mdio_bus *fmb = bus->priv;
121 struct fixed_phy *fp; 122 struct fixed_phy *fp;
@@ -124,7 +125,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
124 return -1; 125 return -1;
125 126
126 list_for_each_entry(fp, &fmb->phys, node) { 127 list_for_each_entry(fp, &fmb->phys, node) {
127 if (fp->id == phy_id) { 128 if (fp->addr == phy_addr) {
128 /* Issue callback if user registered it. */ 129 /* Issue callback if user registered it. */
129 if (fp->link_update) { 130 if (fp->link_update) {
130 fp->link_update(fp->phydev->attached_dev, 131 fp->link_update(fp->phydev->attached_dev,
@@ -138,7 +139,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
138 return 0xFFFF; 139 return 0xFFFF;
139} 140}
140 141
141static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num, 142static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
142 u16 val) 143 u16 val)
143{ 144{
144 return 0; 145 return 0;
@@ -160,7 +161,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
160 return -EINVAL; 161 return -EINVAL;
161 162
162 list_for_each_entry(fp, &fmb->phys, node) { 163 list_for_each_entry(fp, &fmb->phys, node) {
163 if (fp->id == phydev->phy_id) { 164 if (fp->addr == phydev->addr) {
164 fp->link_update = link_update; 165 fp->link_update = link_update;
165 fp->phydev = phydev; 166 fp->phydev = phydev;
166 return 0; 167 return 0;
@@ -171,7 +172,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
171} 172}
172EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); 173EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
173 174
174int fixed_phy_add(unsigned int irq, int phy_id, 175int fixed_phy_add(unsigned int irq, int phy_addr,
175 struct fixed_phy_status *status) 176 struct fixed_phy_status *status)
176{ 177{
177 int ret; 178 int ret;
@@ -184,9 +185,9 @@ int fixed_phy_add(unsigned int irq, int phy_id,
184 185
185 memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); 186 memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
186 187
187 fmb->irqs[phy_id] = irq; 188 fmb->irqs[phy_addr] = irq;
188 189
189 fp->id = phy_id; 190 fp->addr = phy_addr;
190 fp->status = *status; 191 fp->status = *status;
191 192
192 ret = fixed_phy_update_regs(fp); 193 ret = fixed_phy_update_regs(fp);
@@ -203,6 +204,66 @@ err_regs:
203} 204}
204EXPORT_SYMBOL_GPL(fixed_phy_add); 205EXPORT_SYMBOL_GPL(fixed_phy_add);
205 206
207void fixed_phy_del(int phy_addr)
208{
209 struct fixed_mdio_bus *fmb = &platform_fmb;
210 struct fixed_phy *fp, *tmp;
211
212 list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
213 if (fp->addr == phy_addr) {
214 list_del(&fp->node);
215 kfree(fp);
216 return;
217 }
218 }
219}
220EXPORT_SYMBOL_GPL(fixed_phy_del);
221
222static int phy_fixed_addr;
223static DEFINE_SPINLOCK(phy_fixed_addr_lock);
224
225int fixed_phy_register(unsigned int irq,
226 struct fixed_phy_status *status,
227 struct device_node *np)
228{
229 struct fixed_mdio_bus *fmb = &platform_fmb;
230 struct phy_device *phy;
231 int phy_addr;
232 int ret;
233
234 /* Get the next available PHY address, up to PHY_MAX_ADDR */
235 spin_lock(&phy_fixed_addr_lock);
236 if (phy_fixed_addr == PHY_MAX_ADDR) {
237 spin_unlock(&phy_fixed_addr_lock);
238 return -ENOSPC;
239 }
240 phy_addr = phy_fixed_addr++;
241 spin_unlock(&phy_fixed_addr_lock);
242
243 ret = fixed_phy_add(PHY_POLL, phy_addr, status);
244 if (ret < 0)
245 return ret;
246
247 phy = get_phy_device(fmb->mii_bus, phy_addr, false);
248 if (!phy || IS_ERR(phy)) {
249 fixed_phy_del(phy_addr);
250 return -EINVAL;
251 }
252
253 of_node_get(np);
254 phy->dev.of_node = np;
255
256 ret = phy_device_register(phy);
257 if (ret) {
258 phy_device_free(phy);
259 of_node_put(np);
260 fixed_phy_del(phy_addr);
261 return ret;
262 }
263
264 return 0;
265}
266
206static int __init fixed_mdio_bus_init(void) 267static int __init fixed_mdio_bus_init(void)
207{ 268{
208 struct fixed_mdio_bus *fmb = &platform_fmb; 269 struct fixed_mdio_bus *fmb = &platform_fmb;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 76f54b32a120..2e58aa54484c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
69} 69}
70EXPORT_SYMBOL(mdiobus_alloc_size); 70EXPORT_SYMBOL(mdiobus_alloc_size);
71 71
72static void _devm_mdiobus_free(struct device *dev, void *res)
73{
74 mdiobus_free(*(struct mii_bus **)res);
75}
76
77static int devm_mdiobus_match(struct device *dev, void *res, void *data)
78{
79 struct mii_bus **r = res;
80
81 if (WARN_ON(!r || !*r))
82 return 0;
83
84 return *r == data;
85}
86
87/**
88 * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
89 * @dev: Device to allocate mii_bus for
90 * @sizeof_priv: Space to allocate for private structure.
91 *
92 * Managed mdiobus_alloc_size. mii_bus allocated with this function is
93 * automatically freed on driver detach.
94 *
95 * If an mii_bus allocated with this function needs to be freed separately,
96 * devm_mdiobus_free() must be used.
97 *
98 * RETURNS:
99 * Pointer to allocated mii_bus on success, NULL on failure.
100 */
101struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
102{
103 struct mii_bus **ptr, *bus;
104
105 ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
106 if (!ptr)
107 return NULL;
108
109 /* use raw alloc_dr for kmalloc caller tracing */
110 bus = mdiobus_alloc_size(sizeof_priv);
111 if (bus) {
112 *ptr = bus;
113 devres_add(dev, ptr);
114 } else {
115 devres_free(ptr);
116 }
117
118 return bus;
119}
120EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
121
122/**
123 * devm_mdiobus_free - Resource-managed mdiobus_free()
124 * @dev: Device this mii_bus belongs to
125 * @bus: the mii_bus associated with the device
126 *
127 * Free mii_bus allocated with devm_mdiobus_alloc_size().
128 */
129void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
130{
131 int rc;
132
133 rc = devres_release(dev, _devm_mdiobus_free,
134 devm_mdiobus_match, bus);
135 WARN_ON(rc);
136}
137EXPORT_SYMBOL_GPL(devm_mdiobus_free);
138
72/** 139/**
73 * mdiobus_release - mii_bus device release callback 140 * mdiobus_release - mii_bus device release callback
74 * @d: the target struct device that contains the mii_bus 141 * @d: the target struct device that contains the mii_bus
@@ -233,6 +300,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
233 if (IS_ERR(phydev) || phydev == NULL) 300 if (IS_ERR(phydev) || phydev == NULL)
234 return phydev; 301 return phydev;
235 302
303 /*
304 * For DT, see if the auto-probed phy has a correspoding child
305 * in the bus node, and set the of_node pointer in this case.
306 */
307 of_mdiobus_link_phydev(bus, phydev);
308
236 err = phy_device_register(phydev); 309 err = phy_device_register(phydev);
237 if (err) { 310 if (err) {
238 phy_device_free(phydev); 311 phy_device_free(phydev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d849684231c1..bc7c7d2f75f2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
283 return 0; 283 return 0;
284} 284}
285 285
286#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
287#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
288#define OP_DATA 1
289#define KSZ9031_PS_TO_REG 60
290
291/* Extended registers */
292#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
293#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
294#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
295#define MII_KSZ9031RN_CLK_PAD_SKEW 8
296
297static int ksz9031_extended_write(struct phy_device *phydev,
298 u8 mode, u32 dev_addr, u32 regnum, u16 val)
299{
300 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
301 phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
302 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
303 return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
304}
305
306static int ksz9031_extended_read(struct phy_device *phydev,
307 u8 mode, u32 dev_addr, u32 regnum)
308{
309 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
310 phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
311 phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
312 return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
313}
314
315static int ksz9031_of_load_skew_values(struct phy_device *phydev,
316 struct device_node *of_node,
317 u16 reg, size_t field_sz,
318 char *field[], u8 numfields)
319{
320 int val[4] = {-1, -2, -3, -4};
321 int matches = 0;
322 u16 mask;
323 u16 maxval;
324 u16 newval;
325 int i;
326
327 for (i = 0; i < numfields; i++)
328 if (!of_property_read_u32(of_node, field[i], val + i))
329 matches++;
330
331 if (!matches)
332 return 0;
333
334 if (matches < numfields)
335 newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
336 else
337 newval = 0;
338
339 maxval = (field_sz == 4) ? 0xf : 0x1f;
340 for (i = 0; i < numfields; i++)
341 if (val[i] != -(i + 1)) {
342 mask = 0xffff;
343 mask ^= maxval << (field_sz * i);
344 newval = (newval & mask) |
345 (((val[i] / KSZ9031_PS_TO_REG) & maxval)
346 << (field_sz * i));
347 }
348
349 return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
350}
351
352static int ksz9031_config_init(struct phy_device *phydev)
353{
354 struct device *dev = &phydev->dev;
355 struct device_node *of_node = dev->of_node;
356 char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
357 char *rx_data_skews[4] = {
358 "rxd0-skew-ps", "rxd1-skew-ps",
359 "rxd2-skew-ps", "rxd3-skew-ps"
360 };
361 char *tx_data_skews[4] = {
362 "txd0-skew-ps", "txd1-skew-ps",
363 "txd2-skew-ps", "txd3-skew-ps"
364 };
365 char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
366
367 if (!of_node && dev->parent->of_node)
368 of_node = dev->parent->of_node;
369
370 if (of_node) {
371 ksz9031_of_load_skew_values(phydev, of_node,
372 MII_KSZ9031RN_CLK_PAD_SKEW, 5,
373 clk_skews, 2);
374
375 ksz9031_of_load_skew_values(phydev, of_node,
376 MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
377 control_skews, 2);
378
379 ksz9031_of_load_skew_values(phydev, of_node,
380 MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
381 rx_data_skews, 4);
382
383 ksz9031_of_load_skew_values(phydev, of_node,
384 MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
385 tx_data_skews, 4);
386 }
387 return 0;
388}
389
286#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 390#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
287#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) 391#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
288#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) 392#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
469 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause 573 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
470 | SUPPORTED_Asym_Pause), 574 | SUPPORTED_Asym_Pause),
471 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 575 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
472 .config_init = kszphy_config_init, 576 .config_init = ksz9031_config_init,
473 .config_aneg = genphy_config_aneg, 577 .config_aneg = genphy_config_aneg,
474 .read_status = genphy_read_status, 578 .read_status = genphy_read_status,
475 .ack_interrupt = kszphy_ack_interrupt, 579 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4987a1c6dc52..35d753d22f78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -33,6 +33,7 @@
33#include <linux/mdio.h> 33#include <linux/mdio.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/uaccess.h> 35#include <linux/uaccess.h>
36#include <linux/of.h>
36 37
37#include <asm/irq.h> 38#include <asm/irq.h>
38 39
@@ -1067,14 +1068,11 @@ int genphy_soft_reset(struct phy_device *phydev)
1067} 1068}
1068EXPORT_SYMBOL(genphy_soft_reset); 1069EXPORT_SYMBOL(genphy_soft_reset);
1069 1070
1070static int genphy_config_init(struct phy_device *phydev) 1071int genphy_config_init(struct phy_device *phydev)
1071{ 1072{
1072 int val; 1073 int val;
1073 u32 features; 1074 u32 features;
1074 1075
1075 /* For now, I'll claim that the generic driver supports
1076 * all possible port types
1077 */
1078 features = (SUPPORTED_TP | SUPPORTED_MII 1076 features = (SUPPORTED_TP | SUPPORTED_MII
1079 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1077 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1080 SUPPORTED_BNC); 1078 SUPPORTED_BNC);
@@ -1107,8 +1105,8 @@ static int genphy_config_init(struct phy_device *phydev)
1107 features |= SUPPORTED_1000baseT_Half; 1105 features |= SUPPORTED_1000baseT_Half;
1108 } 1106 }
1109 1107
1110 phydev->supported = features; 1108 phydev->supported &= features;
1111 phydev->advertising = features; 1109 phydev->advertising &= features;
1112 1110
1113 return 0; 1111 return 0;
1114} 1112}
@@ -1118,6 +1116,7 @@ static int gen10g_soft_reset(struct phy_device *phydev)
1118 /* Do nothing for now */ 1116 /* Do nothing for now */
1119 return 0; 1117 return 0;
1120} 1118}
1119EXPORT_SYMBOL(genphy_config_init);
1121 1120
1122static int gen10g_config_init(struct phy_device *phydev) 1121static int gen10g_config_init(struct phy_device *phydev)
1123{ 1122{
@@ -1168,6 +1167,38 @@ static int gen10g_resume(struct phy_device *phydev)
1168 return 0; 1167 return 0;
1169} 1168}
1170 1169
1170static void of_set_phy_supported(struct phy_device *phydev)
1171{
1172 struct device_node *node = phydev->dev.of_node;
1173 u32 max_speed;
1174
1175 if (!IS_ENABLED(CONFIG_OF_MDIO))
1176 return;
1177
1178 if (!node)
1179 return;
1180
1181 if (!of_property_read_u32(node, "max-speed", &max_speed)) {
1182 /* The default values for phydev->supported are provided by the PHY
1183 * driver "features" member, we want to reset to sane defaults fist
1184 * before supporting higher speeds.
1185 */
1186 phydev->supported &= PHY_DEFAULT_FEATURES;
1187
1188 switch (max_speed) {
1189 default:
1190 return;
1191
1192 case SPEED_1000:
1193 phydev->supported |= PHY_1000BT_FEATURES;
1194 case SPEED_100:
1195 phydev->supported |= PHY_100BT_FEATURES;
1196 case SPEED_10:
1197 phydev->supported |= PHY_10BT_FEATURES;
1198 }
1199 }
1200}
1201
1171/** 1202/**
1172 * phy_probe - probe and init a PHY device 1203 * phy_probe - probe and init a PHY device
1173 * @dev: device to probe and init 1204 * @dev: device to probe and init
@@ -1202,7 +1233,8 @@ static int phy_probe(struct device *dev)
1202 * or both of these values 1233 * or both of these values
1203 */ 1234 */
1204 phydev->supported = phydrv->features; 1235 phydev->supported = phydrv->features;
1205 phydev->advertising = phydrv->features; 1236 of_set_phy_supported(phydev);
1237 phydev->advertising = phydev->supported;
1206 1238
1207 /* Set the state to READY by default */ 1239 /* Set the state to READY by default */
1208 phydev->state = PHY_READY; 1240 phydev->state = PHY_READY;
@@ -1295,7 +1327,9 @@ static struct phy_driver genphy_driver[] = {
1295 .name = "Generic PHY", 1327 .name = "Generic PHY",
1296 .soft_reset = genphy_soft_reset, 1328 .soft_reset = genphy_soft_reset,
1297 .config_init = genphy_config_init, 1329 .config_init = genphy_config_init,
1298 .features = 0, 1330 .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
1331 SUPPORTED_AUI | SUPPORTED_FIBRE |
1332 SUPPORTED_BNC,
1299 .config_aneg = genphy_config_aneg, 1333 .config_aneg = genphy_config_aneg,
1300 .aneg_done = genphy_aneg_done, 1334 .aneg_done = genphy_aneg_done,
1301 .read_status = genphy_read_status, 1335 .read_status = genphy_read_status,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fa1d69a38ccf..45483fdfbe06 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,65 +64,51 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
64 return err; 64 return err;
65} 65}
66 66
67/* RTL8201CP */ 67static struct phy_driver realtek_drvs[] = {
68static struct phy_driver rtl8201cp_driver = { 68 {
69 .phy_id = 0x00008201, 69 .phy_id = 0x00008201,
70 .name = "RTL8201CP Ethernet", 70 .name = "RTL8201CP Ethernet",
71 .phy_id_mask = 0x0000ffff, 71 .phy_id_mask = 0x0000ffff,
72 .features = PHY_BASIC_FEATURES, 72 .features = PHY_BASIC_FEATURES,
73 .flags = PHY_HAS_INTERRUPT, 73 .flags = PHY_HAS_INTERRUPT,
74 .config_aneg = &genphy_config_aneg, 74 .config_aneg = &genphy_config_aneg,
75 .read_status = &genphy_read_status, 75 .read_status = &genphy_read_status,
76 .driver = { .owner = THIS_MODULE,}, 76 .driver = { .owner = THIS_MODULE,},
77}; 77 }, {
78 78 .phy_id = 0x001cc912,
79/* RTL8211B */ 79 .name = "RTL8211B Gigabit Ethernet",
80static struct phy_driver rtl8211b_driver = { 80 .phy_id_mask = 0x001fffff,
81 .phy_id = 0x001cc912, 81 .features = PHY_GBIT_FEATURES,
82 .name = "RTL8211B Gigabit Ethernet", 82 .flags = PHY_HAS_INTERRUPT,
83 .phy_id_mask = 0x001fffff, 83 .config_aneg = &genphy_config_aneg,
84 .features = PHY_GBIT_FEATURES, 84 .read_status = &genphy_read_status,
85 .flags = PHY_HAS_INTERRUPT, 85 .ack_interrupt = &rtl821x_ack_interrupt,
86 .config_aneg = &genphy_config_aneg, 86 .config_intr = &rtl8211b_config_intr,
87 .read_status = &genphy_read_status, 87 .driver = { .owner = THIS_MODULE,},
88 .ack_interrupt = &rtl821x_ack_interrupt, 88 }, {
89 .config_intr = &rtl8211b_config_intr, 89 .phy_id = 0x001cc915,
90 .driver = { .owner = THIS_MODULE,}, 90 .name = "RTL8211E Gigabit Ethernet",
91}; 91 .phy_id_mask = 0x001fffff,
92 92 .features = PHY_GBIT_FEATURES,
93/* RTL8211E */ 93 .flags = PHY_HAS_INTERRUPT,
94static struct phy_driver rtl8211e_driver = { 94 .config_aneg = &genphy_config_aneg,
95 .phy_id = 0x001cc915, 95 .read_status = &genphy_read_status,
96 .name = "RTL8211E Gigabit Ethernet", 96 .ack_interrupt = &rtl821x_ack_interrupt,
97 .phy_id_mask = 0x001fffff, 97 .config_intr = &rtl8211e_config_intr,
98 .features = PHY_GBIT_FEATURES, 98 .suspend = genphy_suspend,
99 .flags = PHY_HAS_INTERRUPT, 99 .resume = genphy_resume,
100 .config_aneg = &genphy_config_aneg, 100 .driver = { .owner = THIS_MODULE,},
101 .read_status = &genphy_read_status, 101 },
102 .ack_interrupt = &rtl821x_ack_interrupt,
103 .config_intr = &rtl8211e_config_intr,
104 .suspend = genphy_suspend,
105 .resume = genphy_resume,
106 .driver = { .owner = THIS_MODULE,},
107}; 102};
108 103
109static int __init realtek_init(void) 104static int __init realtek_init(void)
110{ 105{
111 int ret; 106 return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs));
112
113 ret = phy_driver_register(&rtl8201cp_driver);
114 if (ret < 0)
115 return -ENODEV;
116 ret = phy_driver_register(&rtl8211b_driver);
117 if (ret < 0)
118 return -ENODEV;
119 return phy_driver_register(&rtl8211e_driver);
120} 107}
121 108
122static void __exit realtek_exit(void) 109static void __exit realtek_exit(void)
123{ 110{
124 phy_driver_unregister(&rtl8211b_driver); 111 phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs));
125 phy_driver_unregister(&rtl8211e_driver);
126} 112}
127 113
128module_init(realtek_init); 114module_init(realtek_init);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 11f34813e23f..180c49479c42 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
249 249
250static void __exit smsc_exit(void) 250static void __exit smsc_exit(void)
251{ 251{
252 return phy_drivers_unregister(smsc_phy_driver, 252 phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
253 ARRAY_SIZE(smsc_phy_driver));
254} 253}
255 254
256MODULE_DESCRIPTION("SMSC PHY driver"); 255MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 14372c65a7e8..5dc0935da99c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
319 319
320static void __exit vsc82xx_exit(void) 320static void __exit vsc82xx_exit(void)
321{ 321{
322 return phy_drivers_unregister(vsc82xx_driver, 322 phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
323 ARRAY_SIZE(vsc82xx_driver));
324} 323}
325 324
326module_init(vsc82xx_init); 325module_init(vsc82xx_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e3923ebb693f..91d6c1272fcf 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
757 757
758 err = get_filter(argp, &code); 758 err = get_filter(argp, &code);
759 if (err >= 0) { 759 if (err >= 0) {
760 struct sock_fprog fprog = { 760 struct sock_fprog_kern fprog = {
761 .len = err, 761 .len = err,
762 .filter = code, 762 .filter = code,
763 }; 763 };
@@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
778 778
779 err = get_filter(argp, &code); 779 err = get_filter(argp, &code);
780 if (err >= 0) { 780 if (err >= 0) {
781 struct sock_fprog fprog = { 781 struct sock_fprog_kern fprog = {
782 .len = err, 782 .len = err,
783 .filter = code, 783 .filter = code,
784 }; 784 };
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 01805319e1e0..1aff970be33e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
281 nf_reset(skb); 281 nf_reset(skb);
282 282
283 skb->ip_summed = CHECKSUM_NONE; 283 skb->ip_summed = CHECKSUM_NONE;
284 ip_select_ident(skb, &rt->dst, NULL); 284 ip_select_ident(skb, NULL);
285 ip_send_check(iph); 285 ip_send_check(iph);
286 286
287 ip_local_out(skb); 287 ip_local_out(skb);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a8497183ff8b..dac7a0d9bb46 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -494,7 +494,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
494 ndev->mtu = RIO_MAX_MSG_SIZE - 14; 494 ndev->mtu = RIO_MAX_MSG_SIZE - 14;
495 ndev->features = NETIF_F_LLTX; 495 ndev->features = NETIF_F_LLTX;
496 SET_NETDEV_DEV(ndev, &mport->dev); 496 SET_NETDEV_DEV(ndev, &mport->dev);
497 SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); 497 ndev->ethtool_ops = &rionet_ethtool_ops;
498 498
499 spin_lock_init(&rnet->lock); 499 spin_lock_init(&rnet->lock);
500 spin_lock_init(&rnet->tx_lock); 500 spin_lock_init(&rnet->tx_lock);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ce4989be86d9..b4958c7ffa84 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,
968static void __team_compute_features(struct team *team) 968static void __team_compute_features(struct team *team)
969{ 969{
970 struct team_port *port; 970 struct team_port *port;
971 u32 vlan_features = TEAM_VLAN_FEATURES; 971 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
972 unsigned short max_hard_header_len = ETH_HLEN; 972 unsigned short max_hard_header_len = ETH_HLEN;
973 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; 973 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
974 974
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index dbde3412ee5e..a58dfebb5512 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -49,7 +49,7 @@ struct lb_port_mapping {
49struct lb_priv_ex { 49struct lb_priv_ex {
50 struct team *team; 50 struct team *team;
51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; 51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
52 struct sock_fprog *orig_fprog; 52 struct sock_fprog_kern *orig_fprog;
53 struct { 53 struct {
54 unsigned int refresh_interval; /* in tenths of second */ 54 unsigned int refresh_interval; /* in tenths of second */
55 struct delayed_work refresh_dw; 55 struct delayed_work refresh_dw;
@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
241 return 0; 241 return 0;
242} 242}
243 243
244static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, 244static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
245 const void *data) 245 const void *data)
246{ 246{
247 struct sock_fprog *fprog; 247 struct sock_fprog_kern *fprog;
248 struct sock_filter *filter = (struct sock_filter *) data; 248 struct sock_filter *filter = (struct sock_filter *) data;
249 249
250 if (data_len % sizeof(struct sock_filter)) 250 if (data_len % sizeof(struct sock_filter))
251 return -EINVAL; 251 return -EINVAL;
252 fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL); 252 fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
253 if (!fprog) 253 if (!fprog)
254 return -ENOMEM; 254 return -ENOMEM;
255 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL); 255 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
262 return 0; 262 return 0;
263} 263}
264 264
265static void __fprog_destroy(struct sock_fprog *fprog) 265static void __fprog_destroy(struct sock_fprog_kern *fprog)
266{ 266{
267 kfree(fprog->filter); 267 kfree(fprog->filter);
268 kfree(fprog); 268 kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
273 struct lb_priv *lb_priv = get_lb_priv(team); 273 struct lb_priv *lb_priv = get_lb_priv(team);
274 struct sk_filter *fp = NULL; 274 struct sk_filter *fp = NULL;
275 struct sk_filter *orig_fp; 275 struct sk_filter *orig_fp;
276 struct sock_fprog *fprog = NULL; 276 struct sock_fprog_kern *fprog = NULL;
277 int err; 277 int err;
278 278
279 if (ctx->data.bin_val.len) { 279 if (ctx->data.bin_val.len) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ee328ba101e7..98bad1fb1bfb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
498 for (i = 0; i < n; i++) { 498 for (i = 0; i < n; i++) {
499 tfile = rtnl_dereference(tun->tfiles[i]); 499 tfile = rtnl_dereference(tun->tfiles[i]);
500 BUG_ON(!tfile); 500 BUG_ON(!tfile);
501 wake_up_all(&tfile->wq.wait); 501 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
502 RCU_INIT_POINTER(tfile->tun, NULL); 502 RCU_INIT_POINTER(tfile->tun, NULL);
503 --tun->numqueues; 503 --tun->numqueues;
504 } 504 }
505 list_for_each_entry(tfile, &tun->disabled, next) { 505 list_for_each_entry(tfile, &tun->disabled, next) {
506 wake_up_all(&tfile->wq.wait); 506 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
507 RCU_INIT_POINTER(tfile->tun, NULL); 507 RCU_INIT_POINTER(tfile->tun, NULL);
508 } 508 }
509 BUG_ON(tun->numqueues != 0); 509 BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
807 /* Notify and wake up reader process */ 807 /* Notify and wake up reader process */
808 if (tfile->flags & TUN_FASYNC) 808 if (tfile->flags & TUN_FASYNC)
809 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 809 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
810 wake_up_interruptible_poll(&tfile->wq.wait, POLLIN | 810 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
811 POLLRDNORM | POLLRDBAND);
812 811
813 rcu_read_unlock(); 812 rcu_read_unlock();
814 return NETDEV_TX_OK; 813 return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
965 964
966 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 965 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
967 966
968 poll_wait(file, &tfile->wq.wait, wait); 967 poll_wait(file, sk_sleep(sk), wait);
969 968
970 if (!skb_queue_empty(&sk->sk_receive_queue)) 969 if (!skb_queue_empty(&sk->sk_receive_queue))
971 mask |= POLLIN | POLLRDNORM; 970 mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
1330static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 1329static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1331 const struct iovec *iv, ssize_t len, int noblock) 1330 const struct iovec *iv, ssize_t len, int noblock)
1332{ 1331{
1333 DECLARE_WAITQUEUE(wait, current);
1334 struct sk_buff *skb; 1332 struct sk_buff *skb;
1335 ssize_t ret = 0; 1333 ssize_t ret = 0;
1334 int peeked, err, off = 0;
1336 1335
1337 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 1336 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1338 1337
1339 if (unlikely(!noblock)) 1338 if (!len)
1340 add_wait_queue(&tfile->wq.wait, &wait); 1339 return ret;
1341 while (len) {
1342 if (unlikely(!noblock))
1343 current->state = TASK_INTERRUPTIBLE;
1344 1340
1345 /* Read frames from the queue */ 1341 if (tun->dev->reg_state != NETREG_REGISTERED)
1346 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { 1342 return -EIO;
1347 if (noblock) {
1348 ret = -EAGAIN;
1349 break;
1350 }
1351 if (signal_pending(current)) {
1352 ret = -ERESTARTSYS;
1353 break;
1354 }
1355 if (tun->dev->reg_state != NETREG_REGISTERED) {
1356 ret = -EIO;
1357 break;
1358 }
1359
1360 /* Nothing to read, let's sleep */
1361 schedule();
1362 continue;
1363 }
1364 1343
1344 /* Read frames from queue */
1345 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1346 &peeked, &off, &err);
1347 if (skb) {
1365 ret = tun_put_user(tun, tfile, skb, iv, len); 1348 ret = tun_put_user(tun, tfile, skb, iv, len);
1366 kfree_skb(skb); 1349 kfree_skb(skb);
1367 break; 1350 } else
1368 } 1351 ret = err;
1369
1370 if (unlikely(!noblock)) {
1371 current->state = TASK_RUNNING;
1372 remove_wait_queue(&tfile->wq.wait, &wait);
1373 }
1374 1352
1375 return ret; 1353 return ret;
1376} 1354}
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2199 tfile->flags = 0; 2177 tfile->flags = 0;
2200 tfile->ifindex = 0; 2178 tfile->ifindex = 0;
2201 2179
2202 rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2203 init_waitqueue_head(&tfile->wq.wait); 2180 init_waitqueue_head(&tfile->wq.wait);
2181 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
2204 2182
2205 tfile->socket.file = file; 2183 tfile->socket.file = file;
2206 tfile->socket.ops = &tun_socket_ops; 2184 tfile->socket.ops = &tun_socket_ops;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 630caf48f63a..8cfc3bb0c6a6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -793,7 +793,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
793 793
794 netdev->netdev_ops = &catc_netdev_ops; 794 netdev->netdev_ops = &catc_netdev_ops;
795 netdev->watchdog_timeo = TX_TIMEOUT; 795 netdev->watchdog_timeo = TX_TIMEOUT;
796 SET_ETHTOOL_OPS(netdev, &ops); 796 netdev->ethtool_ops = &ops;
797 797
798 catc->usbdev = usbdev; 798 catc->usbdev = usbdev;
799 catc->netdev = netdev; 799 catc->netdev = netdev;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 2e025ddcef21..5ee7a1dbc023 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -24,13 +24,21 @@
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <net/addrconf.h> 25#include <net/addrconf.h>
26 26
27/* alternative VLAN for IP session 0 if not untagged */
28#define MBIM_IPS0_VID 4094
29
27/* driver specific data - must match cdc_ncm usage */ 30/* driver specific data - must match cdc_ncm usage */
28struct cdc_mbim_state { 31struct cdc_mbim_state {
29 struct cdc_ncm_ctx *ctx; 32 struct cdc_ncm_ctx *ctx;
30 atomic_t pmcount; 33 atomic_t pmcount;
31 struct usb_driver *subdriver; 34 struct usb_driver *subdriver;
32 struct usb_interface *control; 35 unsigned long _unused;
33 struct usb_interface *data; 36 unsigned long flags;
37};
38
39/* flags for the cdc_mbim_state.flags field */
40enum cdc_mbim_flags {
41 FLAG_IPS0_VLAN = 1 << 0, /* IP session 0 is tagged */
34}; 42};
35 43
36/* using a counter to merge subdriver requests with our own into a combined state */ 44/* using a counter to merge subdriver requests with our own into a combined state */
@@ -62,16 +70,91 @@ static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
62 return cdc_mbim_manage_power(dev, status); 70 return cdc_mbim_manage_power(dev, status);
63} 71}
64 72
73static int cdc_mbim_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
74{
75 struct usbnet *dev = netdev_priv(netdev);
76 struct cdc_mbim_state *info = (void *)&dev->data;
77
78 /* creation of this VLAN is a request to tag IP session 0 */
79 if (vid == MBIM_IPS0_VID)
80 info->flags |= FLAG_IPS0_VLAN;
81 else
82 if (vid >= 512) /* we don't map these to MBIM session */
83 return -EINVAL;
84 return 0;
85}
86
87static int cdc_mbim_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
88{
89 struct usbnet *dev = netdev_priv(netdev);
90 struct cdc_mbim_state *info = (void *)&dev->data;
91
92 /* this is a request for an untagged IP session 0 */
93 if (vid == MBIM_IPS0_VID)
94 info->flags &= ~FLAG_IPS0_VLAN;
95 return 0;
96}
97
98static const struct net_device_ops cdc_mbim_netdev_ops = {
99 .ndo_open = usbnet_open,
100 .ndo_stop = usbnet_stop,
101 .ndo_start_xmit = usbnet_start_xmit,
102 .ndo_tx_timeout = usbnet_tx_timeout,
103 .ndo_change_mtu = usbnet_change_mtu,
104 .ndo_set_mac_address = eth_mac_addr,
105 .ndo_validate_addr = eth_validate_addr,
106 .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
107 .ndo_vlan_rx_kill_vid = cdc_mbim_rx_kill_vid,
108};
109
110/* Change the control interface altsetting and update the .driver_info
111 * pointer if the matching entry after changing class codes points to
112 * a different struct
113 */
114static int cdc_mbim_set_ctrlalt(struct usbnet *dev, struct usb_interface *intf, u8 alt)
115{
116 struct usb_driver *driver = to_usb_driver(intf->dev.driver);
117 const struct usb_device_id *id;
118 struct driver_info *info;
119 int ret;
120
121 ret = usb_set_interface(dev->udev,
122 intf->cur_altsetting->desc.bInterfaceNumber,
123 alt);
124 if (ret)
125 return ret;
126
127 id = usb_match_id(intf, driver->id_table);
128 if (!id)
129 return -ENODEV;
130
131 info = (struct driver_info *)id->driver_info;
132 if (info != dev->driver_info) {
133 dev_dbg(&intf->dev, "driver_info updated to '%s'\n",
134 info->description);
135 dev->driver_info = info;
136 }
137 return 0;
138}
65 139
66static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) 140static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
67{ 141{
68 struct cdc_ncm_ctx *ctx; 142 struct cdc_ncm_ctx *ctx;
69 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 143 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
70 int ret = -ENODEV; 144 int ret = -ENODEV;
71 u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); 145 u8 data_altsetting = 1;
72 struct cdc_mbim_state *info = (void *)&dev->data; 146 struct cdc_mbim_state *info = (void *)&dev->data;
73 147
74 /* Probably NCM, defer for cdc_ncm_bind */ 148 /* should we change control altsetting on a NCM/MBIM function? */
149 if (cdc_ncm_select_altsetting(intf) == CDC_NCM_COMM_ALTSETTING_MBIM) {
150 data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
151 ret = cdc_mbim_set_ctrlalt(dev, intf, CDC_NCM_COMM_ALTSETTING_MBIM);
152 if (ret)
153 goto err;
154 ret = -ENODEV;
155 }
156
157 /* we will hit this for NCM/MBIM functions if prefer_mbim is false */
75 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
76 goto err; 159 goto err;
77 160
@@ -101,7 +184,10 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
101 dev->net->flags |= IFF_NOARP; 184 dev->net->flags |= IFF_NOARP;
102 185
103 /* no need to put the VLAN tci in the packet headers */ 186 /* no need to put the VLAN tci in the packet headers */
104 dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX; 187 dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER;
188
189 /* monitor VLAN additions and removals */
190 dev->net->netdev_ops = &cdc_mbim_netdev_ops;
105err: 191err:
106 return ret; 192 return ret;
107} 193}
@@ -164,12 +250,24 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
164 skb_pull(skb, ETH_HLEN); 250 skb_pull(skb, ETH_HLEN);
165 } 251 }
166 252
253 /* Is IP session <0> tagged too? */
254 if (info->flags & FLAG_IPS0_VLAN) {
255 /* drop all untagged packets */
256 if (!tci)
257 goto error;
258 /* map MBIM_IPS0_VID to IPS<0> */
259 if (tci == MBIM_IPS0_VID)
260 tci = 0;
261 }
262
167 /* mapping VLANs to MBIM sessions: 263 /* mapping VLANs to MBIM sessions:
168 * no tag => IPS session <0> 264 * no tag => IPS session <0> if !FLAG_IPS0_VLAN
169 * 1 - 255 => IPS session <vlanid> 265 * 1 - 255 => IPS session <vlanid>
170 * 256 - 511 => DSS session <vlanid - 256> 266 * 256 - 511 => DSS session <vlanid - 256>
171 * 512 - 4095 => unsupported, drop 267 * 512 - 4093 => unsupported, drop
268 * 4094 => IPS session <0> if FLAG_IPS0_VLAN
172 */ 269 */
270
173 switch (tci & 0x0f00) { 271 switch (tci & 0x0f00) {
174 case 0x0000: /* VLAN ID 0 - 255 */ 272 case 0x0000: /* VLAN ID 0 - 255 */
175 if (!is_ip) 273 if (!is_ip)
@@ -178,6 +276,8 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
178 c[3] = tci; 276 c[3] = tci;
179 break; 277 break;
180 case 0x0100: /* VLAN ID 256 - 511 */ 278 case 0x0100: /* VLAN ID 256 - 511 */
279 if (is_ip)
280 goto error;
181 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN); 281 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
182 c = (u8 *)&sign; 282 c = (u8 *)&sign;
183 c[3] = tci; 283 c[3] = tci;
@@ -223,8 +323,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
223 /* need to send the NA on the VLAN dev, if any */ 323 /* need to send the NA on the VLAN dev, if any */
224 rcu_read_lock(); 324 rcu_read_lock();
225 if (tci) { 325 if (tci) {
226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 326 netdev = __vlan_find_dev_deep_rcu(dev->net, htons(ETH_P_8021Q),
227 tci); 327 tci);
228 if (!netdev) { 328 if (!netdev) {
229 rcu_read_unlock(); 329 rcu_read_unlock();
230 return; 330 return;
@@ -268,7 +368,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
268 __be16 proto = htons(ETH_P_802_3); 368 __be16 proto = htons(ETH_P_802_3);
269 struct sk_buff *skb = NULL; 369 struct sk_buff *skb = NULL;
270 370
271 if (tci < 256) { /* IPS session? */ 371 if (tci < 256 || tci == MBIM_IPS0_VID) { /* IPS session? */
272 if (len < sizeof(struct iphdr)) 372 if (len < sizeof(struct iphdr))
273 goto err; 373 goto err;
274 374
@@ -320,6 +420,7 @@ static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
320 struct usb_cdc_ncm_dpe16 *dpe16; 420 struct usb_cdc_ncm_dpe16 *dpe16;
321 int ndpoffset; 421 int ndpoffset;
322 int loopcount = 50; /* arbitrary max preventing infinite loop */ 422 int loopcount = 50; /* arbitrary max preventing infinite loop */
423 u32 payload = 0;
323 u8 *c; 424 u8 *c;
324 u16 tci; 425 u16 tci;
325 426
@@ -338,6 +439,9 @@ next_ndp:
338 case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN): 439 case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
339 c = (u8 *)&ndp16->dwSignature; 440 c = (u8 *)&ndp16->dwSignature;
340 tci = c[3]; 441 tci = c[3];
442 /* tag IPS<0> packets too if MBIM_IPS0_VID exists */
443 if (!tci && info->flags & FLAG_IPS0_VLAN)
444 tci = MBIM_IPS0_VID;
341 break; 445 break;
342 case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN): 446 case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
343 c = (u8 *)&ndp16->dwSignature; 447 c = (u8 *)&ndp16->dwSignature;
@@ -379,6 +483,7 @@ next_ndp:
379 if (!skb) 483 if (!skb)
380 goto error; 484 goto error;
381 usbnet_skb_return(dev, skb); 485 usbnet_skb_return(dev, skb);
486 payload += len; /* count payload bytes in this NTB */
382 } 487 }
383 } 488 }
384err_ndp: 489err_ndp:
@@ -387,6 +492,10 @@ err_ndp:
387 if (ndpoffset && loopcount--) 492 if (ndpoffset && loopcount--)
388 goto next_ndp; 493 goto next_ndp;
389 494
495 /* update stats */
496 ctx->rx_overhead += skb_in->len - payload;
497 ctx->rx_ntbs++;
498
390 return 1; 499 return 1;
391error: 500error:
392 return 0; 501 return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9a2bd11943eb..80a844e0ae03 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -65,19 +65,384 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
65static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); 65static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
66static struct usb_driver cdc_ncm_driver; 66static struct usb_driver cdc_ncm_driver;
67 67
68static int cdc_ncm_setup(struct usbnet *dev) 68struct cdc_ncm_stats {
69 char stat_string[ETH_GSTRING_LEN];
70 int sizeof_stat;
71 int stat_offset;
72};
73
74#define CDC_NCM_STAT(str, m) { \
75 .stat_string = str, \
76 .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
77 .stat_offset = offsetof(struct cdc_ncm_ctx, m) }
78#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
79
80static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
81 CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
82 CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
83 CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
84 CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
85 CDC_NCM_SIMPLE_STAT(tx_overhead),
86 CDC_NCM_SIMPLE_STAT(tx_ntbs),
87 CDC_NCM_SIMPLE_STAT(rx_overhead),
88 CDC_NCM_SIMPLE_STAT(rx_ntbs),
89};
90
91static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
92{
93 switch (sset) {
94 case ETH_SS_STATS:
95 return ARRAY_SIZE(cdc_ncm_gstrings_stats);
96 default:
97 return -EOPNOTSUPP;
98 }
99}
100
101static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
102 struct ethtool_stats __always_unused *stats,
103 u64 *data)
104{
105 struct usbnet *dev = netdev_priv(netdev);
106 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
107 int i;
108 char *p = NULL;
109
110 for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
111 p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
112 data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
113 }
114}
115
116static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
117{
118 u8 *p = data;
119 int i;
120
121 switch (stringset) {
122 case ETH_SS_STATS:
123 for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
124 memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
125 p += ETH_GSTRING_LEN;
126 }
127 }
128}
129
130static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
131
132static const struct ethtool_ops cdc_ncm_ethtool_ops = {
133 .get_settings = usbnet_get_settings,
134 .set_settings = usbnet_set_settings,
135 .get_link = usbnet_get_link,
136 .nway_reset = usbnet_nway_reset,
137 .get_drvinfo = usbnet_get_drvinfo,
138 .get_msglevel = usbnet_get_msglevel,
139 .set_msglevel = usbnet_set_msglevel,
140 .get_ts_info = ethtool_op_get_ts_info,
141 .get_sset_count = cdc_ncm_get_sset_count,
142 .get_strings = cdc_ncm_get_strings,
143 .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
144};
145
146static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
147{
148 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
149 u32 val, max, min;
150
151 /* clamp new_rx to sane values */
152 min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
153 max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
154
155 /* dwNtbInMaxSize spec violation? Use MIN size for both limits */
156 if (max < min) {
157 dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
158 le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
159 max = min;
160 }
161
162 val = clamp_t(u32, new_rx, min, max);
163 if (val != new_rx)
164 dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range\n", min, max);
165
166 return val;
167}
168
169static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
170{
171 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
172 u32 val, max, min;
173
174 /* clamp new_tx to sane values */
175 min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
176 max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
177
178 /* some devices set dwNtbOutMaxSize too low for the above default */
179 min = min(min, max);
180
181 val = clamp_t(u32, new_tx, min, max);
182 if (val != new_tx)
183 dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range\n", min, max);
184
185 return val;
186}
187
188static ssize_t cdc_ncm_show_min_tx_pkt(struct device *d, struct device_attribute *attr, char *buf)
189{
190 struct usbnet *dev = netdev_priv(to_net_dev(d));
191 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
192
193 return sprintf(buf, "%u\n", ctx->min_tx_pkt);
194}
195
196static ssize_t cdc_ncm_show_rx_max(struct device *d, struct device_attribute *attr, char *buf)
197{
198 struct usbnet *dev = netdev_priv(to_net_dev(d));
199 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
200
201 return sprintf(buf, "%u\n", ctx->rx_max);
202}
203
204static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
205{
206 struct usbnet *dev = netdev_priv(to_net_dev(d));
207 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
208
209 return sprintf(buf, "%u\n", ctx->tx_max);
210}
211
212static ssize_t cdc_ncm_show_tx_timer_usecs(struct device *d, struct device_attribute *attr, char *buf)
213{
214 struct usbnet *dev = netdev_priv(to_net_dev(d));
215 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
216
217 return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC);
218}
219
220static ssize_t cdc_ncm_store_min_tx_pkt(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
221{
222 struct usbnet *dev = netdev_priv(to_net_dev(d));
223 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
224 unsigned long val;
225
226 /* no need to restrict values - anything from 0 to infinity is OK */
227 if (kstrtoul(buf, 0, &val))
228 return -EINVAL;
229
230 ctx->min_tx_pkt = val;
231 return len;
232}
233
234static ssize_t cdc_ncm_store_rx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
235{
236 struct usbnet *dev = netdev_priv(to_net_dev(d));
237 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
238 unsigned long val;
239
240 if (kstrtoul(buf, 0, &val) || cdc_ncm_check_rx_max(dev, val) != val)
241 return -EINVAL;
242
243 cdc_ncm_update_rxtx_max(dev, val, ctx->tx_max);
244 return len;
245}
246
247static ssize_t cdc_ncm_store_tx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
69{ 248{
249 struct usbnet *dev = netdev_priv(to_net_dev(d));
70 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 250 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
251 unsigned long val;
252
253 if (kstrtoul(buf, 0, &val) || cdc_ncm_check_tx_max(dev, val) != val)
254 return -EINVAL;
255
256 cdc_ncm_update_rxtx_max(dev, ctx->rx_max, val);
257 return len;
258}
259
260static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
261{
262 struct usbnet *dev = netdev_priv(to_net_dev(d));
263 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
264 ssize_t ret;
265 unsigned long val;
266
267 ret = kstrtoul(buf, 0, &val);
268 if (ret)
269 return ret;
270 if (val && (val < CDC_NCM_TIMER_INTERVAL_MIN || val > CDC_NCM_TIMER_INTERVAL_MAX))
271 return -EINVAL;
272
273 spin_lock_bh(&ctx->mtx);
274 ctx->timer_interval = val * NSEC_PER_USEC;
275 if (!ctx->timer_interval)
276 ctx->tx_timer_pending = 0;
277 spin_unlock_bh(&ctx->mtx);
278 return len;
279}
280
281static DEVICE_ATTR(min_tx_pkt, S_IRUGO | S_IWUSR, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
282static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
283static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
284static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
285
286#define NCM_PARM_ATTR(name, format, tocpu) \
287static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *attr, char *buf) \
288{ \
289 struct usbnet *dev = netdev_priv(to_net_dev(d)); \
290 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
291 return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
292} \
293static DEVICE_ATTR(name, S_IRUGO, cdc_ncm_show_##name, NULL)
294
295NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu);
296NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu);
297NCM_PARM_ATTR(wNdpInDivisor, "%u", le16_to_cpu);
298NCM_PARM_ATTR(wNdpInPayloadRemainder, "%u", le16_to_cpu);
299NCM_PARM_ATTR(wNdpInAlignment, "%u", le16_to_cpu);
300NCM_PARM_ATTR(dwNtbOutMaxSize, "%u", le32_to_cpu);
301NCM_PARM_ATTR(wNdpOutDivisor, "%u", le16_to_cpu);
302NCM_PARM_ATTR(wNdpOutPayloadRemainder, "%u", le16_to_cpu);
303NCM_PARM_ATTR(wNdpOutAlignment, "%u", le16_to_cpu);
304NCM_PARM_ATTR(wNtbOutMaxDatagrams, "%u", le16_to_cpu);
305
306static struct attribute *cdc_ncm_sysfs_attrs[] = {
307 &dev_attr_min_tx_pkt.attr,
308 &dev_attr_rx_max.attr,
309 &dev_attr_tx_max.attr,
310 &dev_attr_tx_timer_usecs.attr,
311 &dev_attr_bmNtbFormatsSupported.attr,
312 &dev_attr_dwNtbInMaxSize.attr,
313 &dev_attr_wNdpInDivisor.attr,
314 &dev_attr_wNdpInPayloadRemainder.attr,
315 &dev_attr_wNdpInAlignment.attr,
316 &dev_attr_dwNtbOutMaxSize.attr,
317 &dev_attr_wNdpOutDivisor.attr,
318 &dev_attr_wNdpOutPayloadRemainder.attr,
319 &dev_attr_wNdpOutAlignment.attr,
320 &dev_attr_wNtbOutMaxDatagrams.attr,
321 NULL,
322};
323
324static struct attribute_group cdc_ncm_sysfs_attr_group = {
325 .name = "cdc_ncm",
326 .attrs = cdc_ncm_sysfs_attrs,
327};
328
329/* handle rx_max and tx_max changes */
330static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
331{
332 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
333 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
71 u32 val; 334 u32 val;
72 u8 flags;
73 u8 iface_no;
74 int err;
75 int eth_hlen;
76 u16 mbim_mtu;
77 u16 ntb_fmt_supported;
78 __le16 max_datagram_size;
79 335
80 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 336 val = cdc_ncm_check_rx_max(dev, new_rx);
337
338 /* inform device about NTB input size changes */
339 if (val != ctx->rx_max) {
340 __le32 dwNtbInMaxSize = cpu_to_le32(val);
341
342 dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
343
344 /* tell device to use new size */
345 if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
346 USB_TYPE_CLASS | USB_DIR_OUT
347 | USB_RECIP_INTERFACE,
348 0, iface_no, &dwNtbInMaxSize, 4) < 0)
349 dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
350 else
351 ctx->rx_max = val;
352 }
353
354 /* usbnet use these values for sizing rx queues */
355 if (dev->rx_urb_size != ctx->rx_max) {
356 dev->rx_urb_size = ctx->rx_max;
357 if (netif_running(dev->net))
358 usbnet_unlink_rx_urbs(dev);
359 }
360
361 val = cdc_ncm_check_tx_max(dev, new_tx);
362 if (val != ctx->tx_max)
363 dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
364
365 /* Adding a pad byte here if necessary simplifies the handling
366 * in cdc_ncm_fill_tx_frame, making tx_max always represent
367 * the real skb max size.
368 *
369 * We cannot use dev->maxpacket here because this is called from
370 * .bind which is called before usbnet sets up dev->maxpacket
371 */
372 if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
373 val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
374 val++;
375
376 /* we might need to flush any pending tx buffers if running */
377 if (netif_running(dev->net) && val > ctx->tx_max) {
378 netif_tx_lock_bh(dev->net);
379 usbnet_start_xmit(NULL, dev->net);
380 /* make sure tx_curr_skb is reallocated if it was empty */
381 if (ctx->tx_curr_skb) {
382 dev_kfree_skb_any(ctx->tx_curr_skb);
383 ctx->tx_curr_skb = NULL;
384 }
385 ctx->tx_max = val;
386 netif_tx_unlock_bh(dev->net);
387 } else {
388 ctx->tx_max = val;
389 }
390
391 dev->hard_mtu = ctx->tx_max;
392
393 /* max qlen depend on hard_mtu and rx_urb_size */
394 usbnet_update_max_qlen(dev);
395
396 /* never pad more than 3 full USB packets per transfer */
397 ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
398 CDC_NCM_MIN_TX_PKT, ctx->tx_max);
399}
400
401/* helpers for NCM and MBIM differences */
402static u8 cdc_ncm_flags(struct usbnet *dev)
403{
404 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
405
406 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
407 return ctx->mbim_desc->bmNetworkCapabilities;
408 if (ctx->func_desc)
409 return ctx->func_desc->bmNetworkCapabilities;
410 return 0;
411}
412
413static int cdc_ncm_eth_hlen(struct usbnet *dev)
414{
415 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
416 return 0;
417 return ETH_HLEN;
418}
419
420static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
421{
422 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
423 return CDC_MBIM_MIN_DATAGRAM_SIZE;
424 return CDC_NCM_MIN_DATAGRAM_SIZE;
425}
426
427static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
428{
429 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
430
431 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
432 return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
433 if (ctx->ether_desc)
434 return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
435 return CDC_NCM_MAX_DATAGRAM_SIZE;
436}
437
438/* initial one-time device setup. MUST be called with the data interface
439 * in altsetting 0
440 */
441static int cdc_ncm_init(struct usbnet *dev)
442{
443 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
444 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
445 int err;
81 446
82 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, 447 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
83 USB_TYPE_CLASS | USB_DIR_IN 448 USB_TYPE_CLASS | USB_DIR_IN
@@ -89,7 +454,36 @@ static int cdc_ncm_setup(struct usbnet *dev)
89 return err; /* GET_NTB_PARAMETERS is required */ 454 return err; /* GET_NTB_PARAMETERS is required */
90 } 455 }
91 456
92 /* read correct set of parameters according to device mode */ 457 /* set CRC Mode */
458 if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
459 dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
460 err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
461 USB_TYPE_CLASS | USB_DIR_OUT
462 | USB_RECIP_INTERFACE,
463 USB_CDC_NCM_CRC_NOT_APPENDED,
464 iface_no, NULL, 0);
465 if (err < 0)
466 dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
467 }
468
469 /* set NTB format, if both formats are supported.
470 *
471 * "The host shall only send this command while the NCM Data
472 * Interface is in alternate setting 0."
473 */
474 if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
475 USB_CDC_NCM_NTB32_SUPPORTED) {
476 dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
477 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
478 USB_TYPE_CLASS | USB_DIR_OUT
479 | USB_RECIP_INTERFACE,
480 USB_CDC_NCM_NTB16_FORMAT,
481 iface_no, NULL, 0);
482 if (err < 0)
483 dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
484 }
485
486 /* set initial device values */
93 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); 487 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
94 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); 488 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
95 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 489 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
@@ -97,72 +491,79 @@ static int cdc_ncm_setup(struct usbnet *dev)
97 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 491 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
98 /* devices prior to NCM Errata shall set this field to zero */ 492 /* devices prior to NCM Errata shall set this field to zero */
99 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); 493 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
100 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
101
102 /* there are some minor differences in NCM and MBIM defaults */
103 if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
104 if (!ctx->mbim_desc)
105 return -EINVAL;
106 eth_hlen = 0;
107 flags = ctx->mbim_desc->bmNetworkCapabilities;
108 ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
109 if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
110 ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
111 } else {
112 if (!ctx->func_desc)
113 return -EINVAL;
114 eth_hlen = ETH_HLEN;
115 flags = ctx->func_desc->bmNetworkCapabilities;
116 ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
117 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
118 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
119 }
120
121 /* common absolute max for NCM and MBIM */
122 if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
123 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
124 494
125 dev_dbg(&dev->intf->dev, 495 dev_dbg(&dev->intf->dev,
126 "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", 496 "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
127 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 497 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
128 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); 498 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
129 499
130 /* max count of tx datagrams */ 500 /* max count of tx datagrams */
131 if ((ctx->tx_max_datagrams == 0) || 501 if ((ctx->tx_max_datagrams == 0) ||
132 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) 502 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
133 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 503 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
134 504
135 /* verify maximum size of received NTB in bytes */ 505 /* set up maximum NDP size */
136 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { 506 ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
137 dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
138 USB_CDC_NCM_NTB_MIN_IN_SIZE);
139 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
140 }
141 507
142 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { 508 /* initial coalescing timer interval */
143 dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n", 509 ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
144 CDC_NCM_NTB_MAX_SIZE_RX);
145 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
146 }
147 510
148 /* inform device about NTB input size changes */ 511 return 0;
149 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { 512}
150 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
151 513
152 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, 514/* set a new max datagram size */
153 USB_TYPE_CLASS | USB_DIR_OUT 515static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
154 | USB_RECIP_INTERFACE, 516{
155 0, iface_no, &dwNtbInMaxSize, 4); 517 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
156 if (err < 0) 518 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
157 dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n"); 519 __le16 max_datagram_size;
520 u16 mbim_mtu;
521 int err;
522
523 /* set default based on descriptors */
524 ctx->max_datagram_size = clamp_t(u32, new_size,
525 cdc_ncm_min_dgram_size(dev),
526 CDC_NCM_MAX_DATAGRAM_SIZE);
527
528 /* inform the device about the selected Max Datagram Size? */
529 if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
530 goto out;
531
532 /* read current mtu value from device */
533 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
534 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
535 0, iface_no, &max_datagram_size, 2);
536 if (err < 0) {
537 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
538 goto out;
158 } 539 }
159 540
160 /* verify maximum size of transmitted NTB in bytes */ 541 if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
161 if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) { 542 goto out;
162 dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", 543
163 CDC_NCM_NTB_MAX_SIZE_TX); 544 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
164 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; 545 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
546 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
547 0, iface_no, &max_datagram_size, 2);
548 if (err < 0)
549 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
550
551out:
552 /* set MTU to max supported by the device if necessary */
553 dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
554
555 /* do not exceed operater preferred MTU */
556 if (ctx->mbim_extended_desc) {
557 mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
558 if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
559 dev->net->mtu = mbim_mtu;
165 } 560 }
561}
562
563static void cdc_ncm_fix_modulus(struct usbnet *dev)
564{
565 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
566 u32 val;
166 567
167 /* 568 /*
168 * verify that the structure alignment is: 569 * verify that the structure alignment is:
@@ -199,68 +600,31 @@ static int cdc_ncm_setup(struct usbnet *dev)
199 } 600 }
200 601
201 /* adjust TX-remainder according to NCM specification. */ 602 /* adjust TX-remainder according to NCM specification. */
202 ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) & 603 ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
203 (ctx->tx_modulus - 1)); 604 (ctx->tx_modulus - 1));
605}
204 606
205 /* additional configuration */ 607static int cdc_ncm_setup(struct usbnet *dev)
206 608{
207 /* set CRC Mode */ 609 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
208 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { 610 u32 def_rx, def_tx;
209 err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
210 USB_TYPE_CLASS | USB_DIR_OUT
211 | USB_RECIP_INTERFACE,
212 USB_CDC_NCM_CRC_NOT_APPENDED,
213 iface_no, NULL, 0);
214 if (err < 0)
215 dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
216 }
217
218 /* set NTB format, if both formats are supported */
219 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
220 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
221 USB_TYPE_CLASS | USB_DIR_OUT
222 | USB_RECIP_INTERFACE,
223 USB_CDC_NCM_NTB16_FORMAT,
224 iface_no, NULL, 0);
225 if (err < 0)
226 dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
227 }
228
229 /* inform the device about the selected Max Datagram Size */
230 if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
231 goto out;
232
233 /* read current mtu value from device */
234 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
235 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
236 0, iface_no, &max_datagram_size, 2);
237 if (err < 0) {
238 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
239 goto out;
240 }
241
242 if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
243 goto out;
244 611
245 max_datagram_size = cpu_to_le16(ctx->max_datagram_size); 612 /* be conservative when selecting intial buffer size to
246 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, 613 * increase the number of hosts this will work for
247 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 614 */
248 0, iface_no, &max_datagram_size, 2); 615 def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
249 if (err < 0) 616 le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
250 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); 617 def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
618 le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
251 619
252out: 620 /* clamp rx_max and tx_max and inform device */
253 /* set MTU to max supported by the device if necessary */ 621 cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
254 if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
255 dev->net->mtu = ctx->max_datagram_size - eth_hlen;
256 622
257 /* do not exceed operater preferred MTU */ 623 /* sanitize the modulus and remainder values */
258 if (ctx->mbim_extended_desc) { 624 cdc_ncm_fix_modulus(dev);
259 mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
260 if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
261 dev->net->mtu = mbim_mtu;
262 }
263 625
626 /* set max datagram size */
627 cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
264 return 0; 628 return 0;
265} 629}
266 630
@@ -424,10 +788,21 @@ advance:
424 } 788 }
425 789
426 /* check if we got everything */ 790 /* check if we got everything */
427 if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) { 791 if (!ctx->data) {
428 dev_dbg(&intf->dev, "CDC descriptors missing\n"); 792 dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
429 goto error; 793 goto error;
430 } 794 }
795 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
796 if (!ctx->mbim_desc) {
797 dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
798 goto error;
799 }
800 } else {
801 if (!ctx->ether_desc || !ctx->func_desc) {
802 dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
803 goto error;
804 }
805 }
431 806
432 /* claim data interface, if different from control */ 807 /* claim data interface, if different from control */
433 if (ctx->data != ctx->control) { 808 if (ctx->data != ctx->control) {
@@ -447,8 +822,8 @@ advance:
447 goto error2; 822 goto error2;
448 } 823 }
449 824
450 /* initialize data interface */ 825 /* initialize basic device settings */
451 if (cdc_ncm_setup(dev)) 826 if (cdc_ncm_init(dev))
452 goto error2; 827 goto error2;
453 828
454 /* configure data interface */ 829 /* configure data interface */
@@ -477,18 +852,14 @@ advance:
477 dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr); 852 dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
478 } 853 }
479 854
480 /* usbnet use these values for sizing tx/rx queues */ 855 /* finish setting up the device specific data */
481 dev->hard_mtu = ctx->tx_max; 856 cdc_ncm_setup(dev);
482 dev->rx_urb_size = ctx->rx_max;
483 857
484 /* cdc_ncm_setup will override dwNtbOutMaxSize if it is 858 /* override ethtool_ops */
485 * outside the sane range. Adding a pad byte here if necessary 859 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
486 * simplifies the handling in cdc_ncm_fill_tx_frame, making 860
487 * tx_max always represent the real skb max size. 861 /* add our sysfs attrs */
488 */ 862 dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
489 if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
490 ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
491 ctx->tx_max++;
492 863
493 return 0; 864 return 0;
494 865
@@ -541,10 +912,10 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
541} 912}
542EXPORT_SYMBOL_GPL(cdc_ncm_unbind); 913EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
543 914
544/* Select the MBIM altsetting iff it is preferred and available, 915/* Return the number of the MBIM control interface altsetting iff it
545 * returning the number of the corresponding data interface altsetting 916 * is preferred and available,
546 */ 917 */
547u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) 918u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
548{ 919{
549 struct usb_host_interface *alt; 920 struct usb_host_interface *alt;
550 921
@@ -563,15 +934,15 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
563 * the rules given in section 6 (USB Device Model) of this 934 * the rules given in section 6 (USB Device Model) of this
564 * specification." 935 * specification."
565 */ 936 */
566 if (prefer_mbim && intf->num_altsetting == 2) { 937 if (intf->num_altsetting < 2)
938 return intf->cur_altsetting->desc.bAlternateSetting;
939
940 if (prefer_mbim) {
567 alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); 941 alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
568 if (alt && cdc_ncm_comm_intf_is_mbim(alt) && 942 if (alt && cdc_ncm_comm_intf_is_mbim(alt))
569 !usb_set_interface(dev->udev, 943 return CDC_NCM_COMM_ALTSETTING_MBIM;
570 intf->cur_altsetting->desc.bInterfaceNumber,
571 CDC_NCM_COMM_ALTSETTING_MBIM))
572 return CDC_NCM_DATA_ALTSETTING_MBIM;
573 } 944 }
574 return CDC_NCM_DATA_ALTSETTING_NCM; 945 return CDC_NCM_COMM_ALTSETTING_NCM;
575} 946}
576EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); 947EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
577 948
@@ -580,12 +951,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
580 int ret; 951 int ret;
581 952
582 /* MBIM backwards compatible function? */ 953 /* MBIM backwards compatible function? */
583 cdc_ncm_select_altsetting(dev, intf); 954 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
584 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
585 return -ENODEV; 955 return -ENODEV;
586 956
587 /* NCM data altsetting is always 1 */ 957 /* The NCM data altsetting is fixed */
588 ret = cdc_ncm_bind_common(dev, intf, 1); 958 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
589 959
590 /* 960 /*
591 * We should get an event when network connection is "connected" or 961 * We should get an event when network connection is "connected" or
@@ -628,7 +998,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
628 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); 998 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
629 999
630 /* verify that there is room for the NDP and the datagram (reserve) */ 1000 /* verify that there is room for the NDP and the datagram (reserve) */
631 if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE) 1001 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
632 return NULL; 1002 return NULL;
633 1003
634 /* link to it */ 1004 /* link to it */
@@ -638,7 +1008,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
638 nth16->wNdpIndex = cpu_to_le16(skb->len); 1008 nth16->wNdpIndex = cpu_to_le16(skb->len);
639 1009
640 /* push a new empty NDP */ 1010 /* push a new empty NDP */
641 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE); 1011 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
642 ndp16->dwSignature = sign; 1012 ndp16->dwSignature = sign;
643 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); 1013 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
644 return ndp16; 1014 return ndp16;
@@ -683,6 +1053,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
683 1053
684 /* count total number of frames in this NTB */ 1054 /* count total number of frames in this NTB */
685 ctx->tx_curr_frame_num = 0; 1055 ctx->tx_curr_frame_num = 0;
1056
1057 /* recent payload counter for this skb_out */
1058 ctx->tx_curr_frame_payload = 0;
686 } 1059 }
687 1060
688 for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { 1061 for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
@@ -720,6 +1093,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
720 ctx->tx_rem_sign = sign; 1093 ctx->tx_rem_sign = sign;
721 skb = NULL; 1094 skb = NULL;
722 ready2send = 1; 1095 ready2send = 1;
1096 ctx->tx_reason_ntb_full++; /* count reason for transmitting */
723 } 1097 }
724 break; 1098 break;
725 } 1099 }
@@ -733,12 +1107,14 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
733 ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len); 1107 ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
734 ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16)); 1108 ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
735 memcpy(skb_put(skb_out, skb->len), skb->data, skb->len); 1109 memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
1110 ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
736 dev_kfree_skb_any(skb); 1111 dev_kfree_skb_any(skb);
737 skb = NULL; 1112 skb = NULL;
738 1113
739 /* send now if this NDP is full */ 1114 /* send now if this NDP is full */
740 if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) { 1115 if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
741 ready2send = 1; 1116 ready2send = 1;
1117 ctx->tx_reason_ndp_full++; /* count reason for transmitting */
742 break; 1118 break;
743 } 1119 }
744 } 1120 }
@@ -758,7 +1134,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
758 ctx->tx_curr_skb = skb_out; 1134 ctx->tx_curr_skb = skb_out;
759 goto exit_no_skb; 1135 goto exit_no_skb;
760 1136
761 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { 1137 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
762 /* wait for more frames */ 1138 /* wait for more frames */
763 /* push variables */ 1139 /* push variables */
764 ctx->tx_curr_skb = skb_out; 1140 ctx->tx_curr_skb = skb_out;
@@ -768,11 +1144,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
768 goto exit_no_skb; 1144 goto exit_no_skb;
769 1145
770 } else { 1146 } else {
1147 if (n == ctx->tx_max_datagrams)
1148 ctx->tx_reason_max_datagram++; /* count reason for transmitting */
771 /* frame goes out */ 1149 /* frame goes out */
772 /* variables will be reset at next call */ 1150 /* variables will be reset at next call */
773 } 1151 }
774 1152
775 /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT 1153 /* If collected data size is less or equal ctx->min_tx_pkt
776 * bytes, we send buffers as it is. If we get more data, it 1154 * bytes, we send buffers as it is. If we get more data, it
777 * would be more efficient for USB HS mobile device with DMA 1155 * would be more efficient for USB HS mobile device with DMA
778 * engine to receive a full size NTB, than canceling DMA 1156 * engine to receive a full size NTB, than canceling DMA
@@ -782,7 +1160,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
782 * a ZLP after full sized NTBs. 1160 * a ZLP after full sized NTBs.
783 */ 1161 */
784 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && 1162 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
785 skb_out->len > CDC_NCM_MIN_TX_PKT) 1163 skb_out->len > ctx->min_tx_pkt)
786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 1164 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
787 ctx->tx_max - skb_out->len); 1165 ctx->tx_max - skb_out->len);
788 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) 1166 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
@@ -795,11 +1173,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
795 /* return skb */ 1173 /* return skb */
796 ctx->tx_curr_skb = NULL; 1174 ctx->tx_curr_skb = NULL;
797 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 1175 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
1176
1177 /* keep private stats: framing overhead and number of NTBs */
1178 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
1179 ctx->tx_ntbs++;
1180
1181 /* usbnet has already counted all the framing overhead.
1182 * Adjust the stats so that the tx_bytes counter show real
1183 * payload data instead.
1184 */
1185 dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
1186
798 return skb_out; 1187 return skb_out;
799 1188
800exit_no_skb: 1189exit_no_skb:
801 /* Start timer, if there is a remaining skb */ 1190 /* Start timer, if there is a remaining non-empty skb */
802 if (ctx->tx_curr_skb != NULL) 1191 if (ctx->tx_curr_skb != NULL && n > 0)
803 cdc_ncm_tx_timeout_start(ctx); 1192 cdc_ncm_tx_timeout_start(ctx);
804 return NULL; 1193 return NULL;
805} 1194}
@@ -810,7 +1199,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
810 /* start timer, if not already started */ 1199 /* start timer, if not already started */
811 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) 1200 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
812 hrtimer_start(&ctx->tx_timer, 1201 hrtimer_start(&ctx->tx_timer,
813 ktime_set(0, CDC_NCM_TIMER_INTERVAL), 1202 ktime_set(0, ctx->timer_interval),
814 HRTIMER_MODE_REL); 1203 HRTIMER_MODE_REL);
815} 1204}
816 1205
@@ -835,6 +1224,7 @@ static void cdc_ncm_txpath_bh(unsigned long param)
835 cdc_ncm_tx_timeout_start(ctx); 1224 cdc_ncm_tx_timeout_start(ctx);
836 spin_unlock_bh(&ctx->mtx); 1225 spin_unlock_bh(&ctx->mtx);
837 } else if (dev->net != NULL) { 1226 } else if (dev->net != NULL) {
1227 ctx->tx_reason_timeout++; /* count reason for transmitting */
838 spin_unlock_bh(&ctx->mtx); 1228 spin_unlock_bh(&ctx->mtx);
839 netif_tx_lock_bh(dev->net); 1229 netif_tx_lock_bh(dev->net);
840 usbnet_start_xmit(NULL, dev->net); 1230 usbnet_start_xmit(NULL, dev->net);
@@ -970,6 +1360,7 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
970 struct usb_cdc_ncm_dpe16 *dpe16; 1360 struct usb_cdc_ncm_dpe16 *dpe16;
971 int ndpoffset; 1361 int ndpoffset;
972 int loopcount = 50; /* arbitrary max preventing infinite loop */ 1362 int loopcount = 50; /* arbitrary max preventing infinite loop */
1363 u32 payload = 0;
973 1364
974 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in); 1365 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
975 if (ndpoffset < 0) 1366 if (ndpoffset < 0)
@@ -1015,13 +1406,13 @@ next_ndp:
1015 break; 1406 break;
1016 1407
1017 } else { 1408 } else {
1018 skb = skb_clone(skb_in, GFP_ATOMIC); 1409 /* create a fresh copy to reduce truesize */
1410 skb = netdev_alloc_skb_ip_align(dev->net, len);
1019 if (!skb) 1411 if (!skb)
1020 goto error; 1412 goto error;
1021 skb->len = len; 1413 memcpy(skb_put(skb, len), skb_in->data + offset, len);
1022 skb->data = ((u8 *)skb_in->data) + offset;
1023 skb_set_tail_pointer(skb, len);
1024 usbnet_skb_return(dev, skb); 1414 usbnet_skb_return(dev, skb);
1415 payload += len; /* count payload bytes in this NTB */
1025 } 1416 }
1026 } 1417 }
1027err_ndp: 1418err_ndp:
@@ -1030,6 +1421,10 @@ err_ndp:
1030 if (ndpoffset && loopcount--) 1421 if (ndpoffset && loopcount--)
1031 goto next_ndp; 1422 goto next_ndp;
1032 1423
1424 /* update stats */
1425 ctx->rx_overhead += skb_in->len - payload;
1426 ctx->rx_ntbs++;
1427
1033 return 1; 1428 return 1;
1034error: 1429error:
1035 return 0; 1430 return 0;
@@ -1049,14 +1444,14 @@ cdc_ncm_speed_change(struct usbnet *dev,
1049 */ 1444 */
1050 if ((tx_speed > 1000000) && (rx_speed > 1000000)) { 1445 if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
1051 netif_info(dev, link, dev->net, 1446 netif_info(dev, link, dev->net,
1052 "%u mbit/s downlink %u mbit/s uplink\n", 1447 "%u mbit/s downlink %u mbit/s uplink\n",
1053 (unsigned int)(rx_speed / 1000000U), 1448 (unsigned int)(rx_speed / 1000000U),
1054 (unsigned int)(tx_speed / 1000000U)); 1449 (unsigned int)(tx_speed / 1000000U));
1055 } else { 1450 } else {
1056 netif_info(dev, link, dev->net, 1451 netif_info(dev, link, dev->net,
1057 "%u kbit/s downlink %u kbit/s uplink\n", 1452 "%u kbit/s downlink %u kbit/s uplink\n",
1058 (unsigned int)(rx_speed / 1000U), 1453 (unsigned int)(rx_speed / 1000U),
1059 (unsigned int)(tx_speed / 1000U)); 1454 (unsigned int)(tx_speed / 1000U));
1060 } 1455 }
1061} 1456}
1062 1457
@@ -1086,11 +1481,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1086 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be 1481 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1087 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. 1482 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1088 */ 1483 */
1089 ctx->connected = le16_to_cpu(event->wValue);
1090 netif_info(dev, link, dev->net, 1484 netif_info(dev, link, dev->net,
1091 "network connection: %sconnected\n", 1485 "network connection: %sconnected\n",
1092 ctx->connected ? "" : "dis"); 1486 !!event->wValue ? "" : "dis");
1093 usbnet_link_change(dev, ctx->connected, 0); 1487 usbnet_link_change(dev, !!event->wValue, 0);
1094 break; 1488 break;
1095 1489
1096 case USB_CDC_NOTIFY_SPEED_CHANGE: 1490 case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1110,23 +1504,11 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1110 } 1504 }
1111} 1505}
1112 1506
1113static int cdc_ncm_check_connect(struct usbnet *dev)
1114{
1115 struct cdc_ncm_ctx *ctx;
1116
1117 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1118 if (ctx == NULL)
1119 return 1; /* disconnected */
1120
1121 return !ctx->connected;
1122}
1123
1124static const struct driver_info cdc_ncm_info = { 1507static const struct driver_info cdc_ncm_info = {
1125 .description = "CDC NCM", 1508 .description = "CDC NCM",
1126 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, 1509 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1127 .bind = cdc_ncm_bind, 1510 .bind = cdc_ncm_bind,
1128 .unbind = cdc_ncm_unbind, 1511 .unbind = cdc_ncm_unbind,
1129 .check_connect = cdc_ncm_check_connect,
1130 .manage_power = usbnet_manage_power, 1512 .manage_power = usbnet_manage_power,
1131 .status = cdc_ncm_status, 1513 .status = cdc_ncm_status,
1132 .rx_fixup = cdc_ncm_rx_fixup, 1514 .rx_fixup = cdc_ncm_rx_fixup,
@@ -1140,7 +1522,6 @@ static const struct driver_info wwan_info = {
1140 | FLAG_WWAN, 1522 | FLAG_WWAN,
1141 .bind = cdc_ncm_bind, 1523 .bind = cdc_ncm_bind,
1142 .unbind = cdc_ncm_unbind, 1524 .unbind = cdc_ncm_unbind,
1143 .check_connect = cdc_ncm_check_connect,
1144 .manage_power = usbnet_manage_power, 1525 .manage_power = usbnet_manage_power,
1145 .status = cdc_ncm_status, 1526 .status = cdc_ncm_status,
1146 .rx_fixup = cdc_ncm_rx_fixup, 1527 .rx_fixup = cdc_ncm_rx_fixup,
@@ -1154,7 +1535,6 @@ static const struct driver_info wwan_noarp_info = {
1154 | FLAG_WWAN | FLAG_NOARP, 1535 | FLAG_WWAN | FLAG_NOARP,
1155 .bind = cdc_ncm_bind, 1536 .bind = cdc_ncm_bind,
1156 .unbind = cdc_ncm_unbind, 1537 .unbind = cdc_ncm_unbind,
1157 .check_connect = cdc_ncm_check_connect,
1158 .manage_power = usbnet_manage_power, 1538 .manage_power = usbnet_manage_power,
1159 .status = cdc_ncm_status, 1539 .status = cdc_ncm_status,
1160 .rx_fixup = cdc_ncm_rx_fixup, 1540 .rx_fixup = cdc_ncm_rx_fixup,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 660bd5ea9fc0..a3a05869309d 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2425,7 +2425,7 @@ static void hso_net_init(struct net_device *net)
2425 net->type = ARPHRD_NONE; 2425 net->type = ARPHRD_NONE;
2426 net->mtu = DEFAULT_MTU - 14; 2426 net->mtu = DEFAULT_MTU - 14;
2427 net->tx_queue_len = 10; 2427 net->tx_queue_len = 10;
2428 SET_ETHTOOL_OPS(net, &ops); 2428 net->ethtool_ops = &ops;
2429 2429
2430 /* and initialize the semaphore */ 2430 /* and initialize the semaphore */
2431 spin_lock_init(&hso_net->net_lock); 2431 spin_lock_init(&hso_net->net_lock);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 312178d7b698..f9822bc75425 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -172,24 +172,11 @@ err:
172 return ret; 172 return ret;
173} 173}
174 174
175static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
176{
177 struct cdc_ncm_ctx *ctx;
178
179 ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
180
181 if (ctx == NULL)
182 return 1; /* disconnected */
183
184 return !ctx->connected;
185}
186
187static const struct driver_info huawei_cdc_ncm_info = { 175static const struct driver_info huawei_cdc_ncm_info = {
188 .description = "Huawei CDC NCM device", 176 .description = "Huawei CDC NCM device",
189 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 177 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
190 .bind = huawei_cdc_ncm_bind, 178 .bind = huawei_cdc_ncm_bind,
191 .unbind = huawei_cdc_ncm_unbind, 179 .unbind = huawei_cdc_ncm_unbind,
192 .check_connect = huawei_cdc_ncm_check_connect,
193 .manage_power = huawei_cdc_ncm_manage_power, 180 .manage_power = huawei_cdc_ncm_manage_power,
194 .rx_fixup = cdc_ncm_rx_fixup, 181 .rx_fixup = cdc_ncm_rx_fixup,
195 .tx_fixup = cdc_ncm_tx_fixup, 182 .tx_fixup = cdc_ncm_tx_fixup,
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 973275fef250..76465b117b72 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -534,7 +534,7 @@ static int ipheth_probe(struct usb_interface *intf,
534 usb_set_intfdata(intf, dev); 534 usb_set_intfdata(intf, dev);
535 535
536 SET_NETDEV_DEV(netdev, &intf->dev); 536 SET_NETDEV_DEV(netdev, &intf->dev);
537 SET_ETHTOOL_OPS(netdev, &ops); 537 netdev->ethtool_ops = &ops;
538 538
539 retval = register_netdev(netdev); 539 retval = register_netdev(netdev);
540 if (retval) { 540 if (retval) {
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index a359d3bb7c5b..dcb6d33141e0 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1171,7 +1171,7 @@ err_fw:
1171 netdev->netdev_ops = &kaweth_netdev_ops; 1171 netdev->netdev_ops = &kaweth_netdev_ops;
1172 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; 1172 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
1173 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); 1173 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
1174 SET_ETHTOOL_OPS(netdev, &ops); 1174 netdev->ethtool_ops = &ops;
1175 1175
1176 /* kaweth is zeroed as part of alloc_netdev */ 1176 /* kaweth is zeroed as part of alloc_netdev */
1177 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); 1177 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 03e8a15d7deb..f84080215915 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1159,7 +1159,7 @@ static int pegasus_probe(struct usb_interface *intf,
1159 1159
1160 net->watchdog_timeo = PEGASUS_TX_TIMEOUT; 1160 net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
1161 net->netdev_ops = &pegasus_netdev_ops; 1161 net->netdev_ops = &pegasus_netdev_ops;
1162 SET_ETHTOOL_OPS(net, &ops); 1162 net->ethtool_ops = &ops;
1163 pegasus->mii.dev = net; 1163 pegasus->mii.dev = net;
1164 pegasus->mii.mdio_read = mdio_read; 1164 pegasus->mii.mdio_read = mdio_read;
1165 pegasus->mii.mdio_write = mdio_write; 1165 pegasus->mii.mdio_write = mdio_write;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index dc4bf06948c7..cf62d7e8329f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -763,7 +763,12 @@ static const struct usb_device_id products[] = {
763 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 763 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
764 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 764 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
765 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 765 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
766 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 766 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
767 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
768 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
769 {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
770 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
771 {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
767 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ 772 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
768 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 773 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
769 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 774 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3fbfb0869030..25431965a625 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -630,12 +630,10 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
630 int ret; 630 int ret;
631 void *tmp; 631 void *tmp;
632 632
633 tmp = kmalloc(size, GFP_KERNEL); 633 tmp = kmemdup(data, size, GFP_KERNEL);
634 if (!tmp) 634 if (!tmp)
635 return -ENOMEM; 635 return -ENOMEM;
636 636
637 memcpy(tmp, data, size);
638
639 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), 637 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
640 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, 638 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
641 value, index, tmp, size, 500); 639 value, index, tmp, size, 500);
@@ -3452,7 +3450,7 @@ static int rtl8152_probe(struct usb_interface *intf,
3452 NETIF_F_TSO | NETIF_F_FRAGLIST | 3450 NETIF_F_TSO | NETIF_F_FRAGLIST |
3453 NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 3451 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
3454 3452
3455 SET_ETHTOOL_OPS(netdev, &ops); 3453 netdev->ethtool_ops = &ops;
3456 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 3454 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
3457 3455
3458 tp->mii.dev = netdev; 3456 tp->mii.dev = netdev;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index da2c4583bd2d..6e87e5710048 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
878 dev->netdev = netdev; 878 dev->netdev = netdev;
879 netdev->netdev_ops = &rtl8150_netdev_ops; 879 netdev->netdev_ops = &rtl8150_netdev_ops;
880 netdev->watchdog_timeo = RTL8150_TX_TIMEOUT; 880 netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
881 SET_ETHTOOL_OPS(netdev, &ops); 881 netdev->ethtool_ops = &ops;
882 dev->intr_interval = 100; /* 100ms */ 882 dev->intr_interval = 100; /* 100ms */
883 883
884 if (!alloc_all_urbs(dev)) { 884 if (!alloc_all_urbs(dev)) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a852b5f215f..7d9f84a91f37 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1646,7 +1646,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1646 dev->netdev_ops = &virtnet_netdev; 1646 dev->netdev_ops = &virtnet_netdev;
1647 dev->features = NETIF_F_HIGHDMA; 1647 dev->features = NETIF_F_HIGHDMA;
1648 1648
1649 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 1649 dev->ethtool_ops = &virtnet_ethtool_ops;
1650 SET_NETDEV_DEV(dev, &vdev->dev); 1650 SET_NETDEV_DEV(dev, &vdev->dev);
1651 1651
1652 /* Do we support "hardware" checksums? */ 1652 /* Do we support "hardware" checksums? */
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
1724 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1724 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1725 vi->has_cvq = true; 1725 vi->has_cvq = true;
1726 1726
1727 if (vi->any_header_sg) {
1728 if (vi->mergeable_rx_bufs)
1729 dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1730 else
1731 dev->needed_headroom = sizeof(struct virtio_net_hdr);
1732 }
1733
1727 /* Use single tx/rx queue pair as default */ 1734 /* Use single tx/rx queue pair as default */
1728 vi->curr_queue_pairs = 1; 1735 vi->curr_queue_pairs = 1;
1729 vi->max_queue_pairs = max_queue_pairs; 1736 vi->max_queue_pairs = max_queue_pairs;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 600ab56c0008..40c1c7b0d9e0 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -431,8 +431,8 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
431 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 431 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
432 ecmd->duplex = DUPLEX_FULL; 432 ecmd->duplex = DUPLEX_FULL;
433 } else { 433 } else {
434 ethtool_cmd_speed_set(ecmd, -1); 434 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
435 ecmd->duplex = -1; 435 ecmd->duplex = DUPLEX_UNKNOWN;
436 } 436 }
437 return 0; 437 return 0;
438} 438}
@@ -579,7 +579,7 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
579} 579}
580 580
581static int 581static int
582vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) 582vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
583{ 583{
584 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 584 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
585 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 585 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
@@ -592,7 +592,7 @@ vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
592} 592}
593 593
594static int 594static int
595vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) 595vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key)
596{ 596{
597 unsigned int i; 597 unsigned int i;
598 unsigned long flags; 598 unsigned long flags;
@@ -628,12 +628,12 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
628 .get_rxnfc = vmxnet3_get_rxnfc, 628 .get_rxnfc = vmxnet3_get_rxnfc,
629#ifdef VMXNET3_RSS 629#ifdef VMXNET3_RSS
630 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, 630 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
631 .get_rxfh_indir = vmxnet3_get_rss_indir, 631 .get_rxfh = vmxnet3_get_rss,
632 .set_rxfh_indir = vmxnet3_set_rss_indir, 632 .set_rxfh = vmxnet3_set_rss,
633#endif 633#endif
634}; 634};
635 635
636void vmxnet3_set_ethtool_ops(struct net_device *netdev) 636void vmxnet3_set_ethtool_ops(struct net_device *netdev)
637{ 637{
638 SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); 638 netdev->ethtool_ops = &vmxnet3_ethtool_ops;
639} 639}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 4dbb2ed85b97..1610d51dbb5c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -127,6 +127,7 @@ struct vxlan_dev {
127 struct list_head next; /* vxlan's per namespace list */ 127 struct list_head next; /* vxlan's per namespace list */
128 struct vxlan_sock *vn_sock; /* listening socket */ 128 struct vxlan_sock *vn_sock; /* listening socket */
129 struct net_device *dev; 129 struct net_device *dev;
130 struct net *net; /* netns for packet i/o */
130 struct vxlan_rdst default_dst; /* default destination */ 131 struct vxlan_rdst default_dst; /* default destination */
131 union vxlan_addr saddr; /* source address */ 132 union vxlan_addr saddr; /* source address */
132 __be16 dst_port; 133 __be16 dst_port;
@@ -134,7 +135,7 @@ struct vxlan_dev {
134 __u16 port_max; 135 __u16 port_max;
135 __u8 tos; /* TOS override */ 136 __u8 tos; /* TOS override */
136 __u8 ttl; 137 __u8 ttl;
137 u32 flags; /* VXLAN_F_* below */ 138 u32 flags; /* VXLAN_F_* in vxlan.h */
138 139
139 struct work_struct sock_work; 140 struct work_struct sock_work;
140 struct work_struct igmp_join; 141 struct work_struct igmp_join;
@@ -149,13 +150,6 @@ struct vxlan_dev {
149 struct hlist_head fdb_head[FDB_HASH_SIZE]; 150 struct hlist_head fdb_head[FDB_HASH_SIZE];
150}; 151};
151 152
152#define VXLAN_F_LEARN 0x01
153#define VXLAN_F_PROXY 0x02
154#define VXLAN_F_RSC 0x04
155#define VXLAN_F_L2MISS 0x08
156#define VXLAN_F_L3MISS 0x10
157#define VXLAN_F_IPV6 0x20 /* internal flag */
158
159/* salt for hash table */ 153/* salt for hash table */
160static u32 vxlan_salt __read_mostly; 154static u32 vxlan_salt __read_mostly;
161static struct workqueue_struct *vxlan_wq; 155static struct workqueue_struct *vxlan_wq;
@@ -571,6 +565,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
571 goto out; 565 goto out;
572 } 566 }
573 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 567 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
568 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
574 569
575 off_eth = skb_gro_offset(skb); 570 off_eth = skb_gro_offset(skb);
576 hlen = off_eth + sizeof(*eh); 571 hlen = off_eth + sizeof(*eh);
@@ -605,6 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
605 } 600 }
606 601
607 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ 602 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
603 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
608 pp = ptype->callbacks.gro_receive(head, skb); 604 pp = ptype->callbacks.gro_receive(head, skb);
609 605
610out_unlock: 606out_unlock:
@@ -1203,6 +1199,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
1203 1199
1204 remote_ip = &vxlan->default_dst.remote_ip; 1200 remote_ip = &vxlan->default_dst.remote_ip;
1205 skb_reset_mac_header(skb); 1201 skb_reset_mac_header(skb);
1202 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1206 skb->protocol = eth_type_trans(skb, vxlan->dev); 1203 skb->protocol = eth_type_trans(skb, vxlan->dev);
1207 1204
1208 /* Ignore packet loops (and multicast echo) */ 1205 /* Ignore packet loops (and multicast echo) */
@@ -1599,18 +1596,11 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
1599} 1596}
1600EXPORT_SYMBOL_GPL(vxlan_src_port); 1597EXPORT_SYMBOL_GPL(vxlan_src_port);
1601 1598
1602static int handle_offloads(struct sk_buff *skb) 1599static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
1600 bool udp_csum)
1603{ 1601{
1604 if (skb_is_gso(skb)) { 1602 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1605 int err = skb_unclone(skb, GFP_ATOMIC); 1603 return iptunnel_handle_offloads(skb, udp_csum, type);
1606 if (unlikely(err))
1607 return err;
1608
1609 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1610 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
1611 skb->ip_summed = CHECKSUM_NONE;
1612
1613 return 0;
1614} 1604}
1615 1605
1616#if IS_ENABLED(CONFIG_IPV6) 1606#if IS_ENABLED(CONFIG_IPV6)
@@ -1618,7 +1608,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1618 struct dst_entry *dst, struct sk_buff *skb, 1608 struct dst_entry *dst, struct sk_buff *skb,
1619 struct net_device *dev, struct in6_addr *saddr, 1609 struct net_device *dev, struct in6_addr *saddr,
1620 struct in6_addr *daddr, __u8 prio, __u8 ttl, 1610 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1621 __be16 src_port, __be16 dst_port, __be32 vni) 1611 __be16 src_port, __be16 dst_port, __be32 vni,
1612 bool xnet)
1622{ 1613{
1623 struct ipv6hdr *ip6h; 1614 struct ipv6hdr *ip6h;
1624 struct vxlanhdr *vxh; 1615 struct vxlanhdr *vxh;
@@ -1626,12 +1617,11 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1626 int min_headroom; 1617 int min_headroom;
1627 int err; 1618 int err;
1628 1619
1629 if (!skb->encapsulation) { 1620 skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk));
1630 skb_reset_inner_headers(skb); 1621 if (IS_ERR(skb))
1631 skb->encapsulation = 1; 1622 return -EINVAL;
1632 }
1633 1623
1634 skb_scrub_packet(skb, false); 1624 skb_scrub_packet(skb, xnet);
1635 1625
1636 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1626 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1637 + VXLAN_HLEN + sizeof(struct ipv6hdr) 1627 + VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1663,27 +1653,14 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1663 uh->source = src_port; 1653 uh->source = src_port;
1664 1654
1665 uh->len = htons(skb->len); 1655 uh->len = htons(skb->len);
1666 uh->check = 0;
1667 1656
1668 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1657 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1669 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1658 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1670 IPSKB_REROUTED); 1659 IPSKB_REROUTED);
1671 skb_dst_set(skb, dst); 1660 skb_dst_set(skb, dst);
1672 1661
1673 if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) { 1662 udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb,
1674 __wsum csum = skb_checksum(skb, 0, skb->len, 0); 1663 saddr, daddr, skb->len);
1675 skb->ip_summed = CHECKSUM_UNNECESSARY;
1676 uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
1677 IPPROTO_UDP, csum);
1678 if (uh->check == 0)
1679 uh->check = CSUM_MANGLED_0;
1680 } else {
1681 skb->ip_summed = CHECKSUM_PARTIAL;
1682 skb->csum_start = skb_transport_header(skb) - skb->head;
1683 skb->csum_offset = offsetof(struct udphdr, check);
1684 uh->check = ~csum_ipv6_magic(saddr, daddr,
1685 skb->len, IPPROTO_UDP, 0);
1686 }
1687 1664
1688 __skb_push(skb, sizeof(*ip6h)); 1665 __skb_push(skb, sizeof(*ip6h));
1689 skb_reset_network_header(skb); 1666 skb_reset_network_header(skb);
@@ -1699,10 +1676,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1699 ip6h->daddr = *daddr; 1676 ip6h->daddr = *daddr;
1700 ip6h->saddr = *saddr; 1677 ip6h->saddr = *saddr;
1701 1678
1702 err = handle_offloads(skb);
1703 if (err)
1704 return err;
1705
1706 ip6tunnel_xmit(skb, dev); 1679 ip6tunnel_xmit(skb, dev);
1707 return 0; 1680 return 0;
1708} 1681}
@@ -1711,17 +1684,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1711int vxlan_xmit_skb(struct vxlan_sock *vs, 1684int vxlan_xmit_skb(struct vxlan_sock *vs,
1712 struct rtable *rt, struct sk_buff *skb, 1685 struct rtable *rt, struct sk_buff *skb,
1713 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 1686 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1714 __be16 src_port, __be16 dst_port, __be32 vni) 1687 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
1715{ 1688{
1716 struct vxlanhdr *vxh; 1689 struct vxlanhdr *vxh;
1717 struct udphdr *uh; 1690 struct udphdr *uh;
1718 int min_headroom; 1691 int min_headroom;
1719 int err; 1692 int err;
1720 1693
1721 if (!skb->encapsulation) { 1694 skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx);
1722 skb_reset_inner_headers(skb); 1695 if (IS_ERR(skb))
1723 skb->encapsulation = 1; 1696 return -EINVAL;
1724 }
1725 1697
1726 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1698 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1727 + VXLAN_HLEN + sizeof(struct iphdr) 1699 + VXLAN_HLEN + sizeof(struct iphdr)
@@ -1753,14 +1725,12 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1753 uh->source = src_port; 1725 uh->source = src_port;
1754 1726
1755 uh->len = htons(skb->len); 1727 uh->len = htons(skb->len);
1756 uh->check = 0;
1757 1728
1758 err = handle_offloads(skb); 1729 udp_set_csum(vs->sock->sk->sk_no_check_tx, skb,
1759 if (err) 1730 src, dst, skb->len);
1760 return err;
1761 1731
1762 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, 1732 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1763 tos, ttl, df, false); 1733 tos, ttl, df, xnet);
1764} 1734}
1765EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1735EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1766 1736
@@ -1853,7 +1823,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1853 fl4.daddr = dst->sin.sin_addr.s_addr; 1823 fl4.daddr = dst->sin.sin_addr.s_addr;
1854 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; 1824 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1855 1825
1856 rt = ip_route_output_key(dev_net(dev), &fl4); 1826 rt = ip_route_output_key(vxlan->net, &fl4);
1857 if (IS_ERR(rt)) { 1827 if (IS_ERR(rt)) {
1858 netdev_dbg(dev, "no route to %pI4\n", 1828 netdev_dbg(dev, "no route to %pI4\n",
1859 &dst->sin.sin_addr.s_addr); 1829 &dst->sin.sin_addr.s_addr);
@@ -1874,7 +1844,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1874 struct vxlan_dev *dst_vxlan; 1844 struct vxlan_dev *dst_vxlan;
1875 1845
1876 ip_rt_put(rt); 1846 ip_rt_put(rt);
1877 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1847 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1878 if (!dst_vxlan) 1848 if (!dst_vxlan)
1879 goto tx_error; 1849 goto tx_error;
1880 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1850 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1887,7 +1857,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1887 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, 1857 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
1888 fl4.saddr, dst->sin.sin_addr.s_addr, 1858 fl4.saddr, dst->sin.sin_addr.s_addr,
1889 tos, ttl, df, src_port, dst_port, 1859 tos, ttl, df, src_port, dst_port,
1890 htonl(vni << 8)); 1860 htonl(vni << 8),
1861 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1891 1862
1892 if (err < 0) 1863 if (err < 0)
1893 goto rt_tx_error; 1864 goto rt_tx_error;
@@ -1927,7 +1898,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1927 struct vxlan_dev *dst_vxlan; 1898 struct vxlan_dev *dst_vxlan;
1928 1899
1929 dst_release(ndst); 1900 dst_release(ndst);
1930 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); 1901 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1931 if (!dst_vxlan) 1902 if (!dst_vxlan)
1932 goto tx_error; 1903 goto tx_error;
1933 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1904 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1938,7 +1909,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1938 1909
1939 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, 1910 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
1940 dev, &fl6.saddr, &fl6.daddr, 0, ttl, 1911 dev, &fl6.saddr, &fl6.daddr, 0, ttl,
1941 src_port, dst_port, htonl(vni << 8)); 1912 src_port, dst_port, htonl(vni << 8),
1913 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1942#endif 1914#endif
1943 } 1915 }
1944 1916
@@ -2082,7 +2054,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2082static int vxlan_init(struct net_device *dev) 2054static int vxlan_init(struct net_device *dev)
2083{ 2055{
2084 struct vxlan_dev *vxlan = netdev_priv(dev); 2056 struct vxlan_dev *vxlan = netdev_priv(dev);
2085 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 2057 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2086 struct vxlan_sock *vs; 2058 struct vxlan_sock *vs;
2087 2059
2088 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2060 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2090,7 +2062,7 @@ static int vxlan_init(struct net_device *dev)
2090 return -ENOMEM; 2062 return -ENOMEM;
2091 2063
2092 spin_lock(&vn->sock_lock); 2064 spin_lock(&vn->sock_lock);
2093 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); 2065 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
2094 if (vs) { 2066 if (vs) {
2095 /* If we have a socket with same port already, reuse it */ 2067 /* If we have a socket with same port already, reuse it */
2096 atomic_inc(&vs->refcnt); 2068 atomic_inc(&vs->refcnt);
@@ -2172,8 +2144,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
2172/* Cleanup timer and forwarding table on shutdown */ 2144/* Cleanup timer and forwarding table on shutdown */
2173static int vxlan_stop(struct net_device *dev) 2145static int vxlan_stop(struct net_device *dev)
2174{ 2146{
2175 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2176 struct vxlan_dev *vxlan = netdev_priv(dev); 2147 struct vxlan_dev *vxlan = netdev_priv(dev);
2148 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2177 struct vxlan_sock *vs = vxlan->vn_sock; 2149 struct vxlan_sock *vs = vxlan->vn_sock;
2178 2150
2179 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && 2151 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2202,7 +2174,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2202 struct net_device *lowerdev; 2174 struct net_device *lowerdev;
2203 int max_mtu; 2175 int max_mtu;
2204 2176
2205 lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex); 2177 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2206 if (lowerdev == NULL) 2178 if (lowerdev == NULL)
2207 return eth_change_mtu(dev, new_mtu); 2179 return eth_change_mtu(dev, new_mtu);
2208 2180
@@ -2285,7 +2257,6 @@ static void vxlan_setup(struct net_device *dev)
2285 2257
2286 dev->tx_queue_len = 0; 2258 dev->tx_queue_len = 0;
2287 dev->features |= NETIF_F_LLTX; 2259 dev->features |= NETIF_F_LLTX;
2288 dev->features |= NETIF_F_NETNS_LOCAL;
2289 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2260 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2290 dev->features |= NETIF_F_RXCSUM; 2261 dev->features |= NETIF_F_RXCSUM;
2291 dev->features |= NETIF_F_GSO_SOFTWARE; 2262 dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -2401,7 +2372,7 @@ static void vxlan_del_work(struct work_struct *work)
2401 * could be used for both IPv4 and IPv6 communications, but 2372 * could be used for both IPv4 and IPv6 communications, but
2402 * users may set bindv6only=1. 2373 * users may set bindv6only=1.
2403 */ 2374 */
2404static struct socket *create_v6_sock(struct net *net, __be16 port) 2375static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
2405{ 2376{
2406 struct sock *sk; 2377 struct sock *sk;
2407 struct socket *sock; 2378 struct socket *sock;
@@ -2438,18 +2409,25 @@ static struct socket *create_v6_sock(struct net *net, __be16 port)
2438 2409
2439 /* Disable multicast loopback */ 2410 /* Disable multicast loopback */
2440 inet_sk(sk)->mc_loop = 0; 2411 inet_sk(sk)->mc_loop = 0;
2412
2413 if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
2414 udp_set_no_check6_tx(sk, true);
2415
2416 if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
2417 udp_set_no_check6_rx(sk, true);
2418
2441 return sock; 2419 return sock;
2442} 2420}
2443 2421
2444#else 2422#else
2445 2423
2446static struct socket *create_v6_sock(struct net *net, __be16 port) 2424static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
2447{ 2425{
2448 return ERR_PTR(-EPFNOSUPPORT); 2426 return ERR_PTR(-EPFNOSUPPORT);
2449} 2427}
2450#endif 2428#endif
2451 2429
2452static struct socket *create_v4_sock(struct net *net, __be16 port) 2430static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
2453{ 2431{
2454 struct sock *sk; 2432 struct sock *sk;
2455 struct socket *sock; 2433 struct socket *sock;
@@ -2482,18 +2460,24 @@ static struct socket *create_v4_sock(struct net *net, __be16 port)
2482 2460
2483 /* Disable multicast loopback */ 2461 /* Disable multicast loopback */
2484 inet_sk(sk)->mc_loop = 0; 2462 inet_sk(sk)->mc_loop = 0;
2463
2464 if (!(flags & VXLAN_F_UDP_CSUM))
2465 sock->sk->sk_no_check_tx = 1;
2466
2485 return sock; 2467 return sock;
2486} 2468}
2487 2469
2488/* Create new listen socket if needed */ 2470/* Create new listen socket if needed */
2489static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, 2471static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2490 vxlan_rcv_t *rcv, void *data, bool ipv6) 2472 vxlan_rcv_t *rcv, void *data,
2473 u32 flags)
2491{ 2474{
2492 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2475 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2493 struct vxlan_sock *vs; 2476 struct vxlan_sock *vs;
2494 struct socket *sock; 2477 struct socket *sock;
2495 struct sock *sk; 2478 struct sock *sk;
2496 unsigned int h; 2479 unsigned int h;
2480 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2497 2481
2498 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 2482 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2499 if (!vs) 2483 if (!vs)
@@ -2505,9 +2489,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2505 INIT_WORK(&vs->del_work, vxlan_del_work); 2489 INIT_WORK(&vs->del_work, vxlan_del_work);
2506 2490
2507 if (ipv6) 2491 if (ipv6)
2508 sock = create_v6_sock(net, port); 2492 sock = create_v6_sock(net, port, flags);
2509 else 2493 else
2510 sock = create_v4_sock(net, port); 2494 sock = create_v4_sock(net, port, flags);
2511 if (IS_ERR(sock)) { 2495 if (IS_ERR(sock)) {
2512 kfree(vs); 2496 kfree(vs);
2513 return ERR_CAST(sock); 2497 return ERR_CAST(sock);
@@ -2545,12 +2529,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2545 2529
2546struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, 2530struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2547 vxlan_rcv_t *rcv, void *data, 2531 vxlan_rcv_t *rcv, void *data,
2548 bool no_share, bool ipv6) 2532 bool no_share, u32 flags)
2549{ 2533{
2550 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2534 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2551 struct vxlan_sock *vs; 2535 struct vxlan_sock *vs;
2552 2536
2553 vs = vxlan_socket_create(net, port, rcv, data, ipv6); 2537 vs = vxlan_socket_create(net, port, rcv, data, flags);
2554 if (!IS_ERR(vs)) 2538 if (!IS_ERR(vs))
2555 return vs; 2539 return vs;
2556 2540
@@ -2578,12 +2562,12 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
2578static void vxlan_sock_work(struct work_struct *work) 2562static void vxlan_sock_work(struct work_struct *work)
2579{ 2563{
2580 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work); 2564 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2581 struct net *net = dev_net(vxlan->dev); 2565 struct net *net = vxlan->net;
2582 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2566 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2583 __be16 port = vxlan->dst_port; 2567 __be16 port = vxlan->dst_port;
2584 struct vxlan_sock *nvs; 2568 struct vxlan_sock *nvs;
2585 2569
2586 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6); 2570 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
2587 spin_lock(&vn->sock_lock); 2571 spin_lock(&vn->sock_lock);
2588 if (!IS_ERR(nvs)) 2572 if (!IS_ERR(nvs))
2589 vxlan_vs_add_dev(nvs, vxlan); 2573 vxlan_vs_add_dev(nvs, vxlan);
@@ -2605,6 +2589,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2605 if (!data[IFLA_VXLAN_ID]) 2589 if (!data[IFLA_VXLAN_ID])
2606 return -EINVAL; 2590 return -EINVAL;
2607 2591
2592 vxlan->net = dev_net(dev);
2593
2608 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 2594 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2609 dst->remote_vni = vni; 2595 dst->remote_vni = vni;
2610 2596
@@ -2705,12 +2691,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2705 if (data[IFLA_VXLAN_PORT]) 2691 if (data[IFLA_VXLAN_PORT])
2706 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 2692 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2707 2693
2694 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2695 vxlan->flags |= VXLAN_F_UDP_CSUM;
2696
2697 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2698 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2699 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2700
2701 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2702 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2703 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2704
2708 if (vxlan_find_vni(net, vni, vxlan->dst_port)) { 2705 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
2709 pr_info("duplicate VNI %u\n", vni); 2706 pr_info("duplicate VNI %u\n", vni);
2710 return -EEXIST; 2707 return -EEXIST;
2711 } 2708 }
2712 2709
2713 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 2710 dev->ethtool_ops = &vxlan_ethtool_ops;
2714 2711
2715 /* create an fdb entry for a valid default destination */ 2712 /* create an fdb entry for a valid default destination */
2716 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2713 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
@@ -2739,8 +2736,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2739 2736
2740static void vxlan_dellink(struct net_device *dev, struct list_head *head) 2737static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2741{ 2738{
2742 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2743 struct vxlan_dev *vxlan = netdev_priv(dev); 2739 struct vxlan_dev *vxlan = netdev_priv(dev);
2740 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2744 2741
2745 spin_lock(&vn->sock_lock); 2742 spin_lock(&vn->sock_lock);
2746 if (!hlist_unhashed(&vxlan->hlist)) 2743 if (!hlist_unhashed(&vxlan->hlist))
@@ -2768,7 +2765,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
2768 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 2765 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2769 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 2766 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2770 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 2767 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2771 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */ 2768 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2769 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2770 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2771 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2772 0; 2772 0;
2773} 2773}
2774 2774
@@ -2828,7 +2828,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2828 !!(vxlan->flags & VXLAN_F_L3MISS)) || 2828 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2829 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || 2829 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2830 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || 2830 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2831 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port)) 2831 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2832 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2833 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2834 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2835 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2836 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2837 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
2832 goto nla_put_failure; 2838 goto nla_put_failure;
2833 2839
2834 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 2840 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
@@ -2905,8 +2911,33 @@ static __net_init int vxlan_init_net(struct net *net)
2905 return 0; 2911 return 0;
2906} 2912}
2907 2913
2914static void __net_exit vxlan_exit_net(struct net *net)
2915{
2916 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2917 struct vxlan_dev *vxlan, *next;
2918 struct net_device *dev, *aux;
2919 LIST_HEAD(list);
2920
2921 rtnl_lock();
2922 for_each_netdev_safe(net, dev, aux)
2923 if (dev->rtnl_link_ops == &vxlan_link_ops)
2924 unregister_netdevice_queue(dev, &list);
2925
2926 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2927 /* If vxlan->dev is in the same netns, it has already been added
2928 * to the list by the previous loop.
2929 */
2930 if (!net_eq(dev_net(vxlan->dev), net))
2931 unregister_netdevice_queue(dev, &list);
2932 }
2933
2934 unregister_netdevice_many(&list);
2935 rtnl_unlock();
2936}
2937
2908static struct pernet_operations vxlan_net_ops = { 2938static struct pernet_operations vxlan_net_ops = {
2909 .init = vxlan_init_net, 2939 .init = vxlan_init_net,
2940 .exit = vxlan_exit_net,
2910 .id = &vxlan_net_id, 2941 .id = &vxlan_net_id,
2911 .size = sizeof(struct vxlan_net), 2942 .size = sizeof(struct vxlan_net),
2912}; 2943};
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index bcfff0d62de4..93ace042d0aa 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -26,6 +26,7 @@
26#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/delay.h>
29#include <linux/if.h> 30#include <linux/if.h>
30#include <linux/hdlc.h> 31#include <linux/hdlc.h>
31#include <asm/io.h> 32#include <asm/io.h>
@@ -678,7 +679,6 @@ static inline void
678fst_cpureset(struct fst_card_info *card) 679fst_cpureset(struct fst_card_info *card)
679{ 680{
680 unsigned char interrupt_line_register; 681 unsigned char interrupt_line_register;
681 unsigned long j = jiffies + 1;
682 unsigned int regval; 682 unsigned int regval;
683 683
684 if (card->family == FST_FAMILY_TXU) { 684 if (card->family == FST_FAMILY_TXU) {
@@ -696,16 +696,12 @@ fst_cpureset(struct fst_card_info *card)
696 /* 696 /*
697 * We are delaying here to allow the 9054 to reset itself 697 * We are delaying here to allow the 9054 to reset itself
698 */ 698 */
699 j = jiffies + 1; 699 usleep_range(10, 20);
700 while (jiffies < j)
701 /* Do nothing */ ;
702 outw(0x240f, card->pci_conf + CNTRL_9054 + 2); 700 outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
703 /* 701 /*
704 * We are delaying here to allow the 9054 to reload its eeprom 702 * We are delaying here to allow the 9054 to reload its eeprom
705 */ 703 */
706 j = jiffies + 1; 704 usleep_range(10, 20);
707 while (jiffies < j)
708 /* Do nothing */ ;
709 outw(0x040f, card->pci_conf + CNTRL_9054 + 2); 705 outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
710 706
711 if (pci_write_config_byte 707 if (pci_write_config_byte
@@ -886,20 +882,18 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
886 * Receive a frame through the DMA 882 * Receive a frame through the DMA
887 */ 883 */
888static inline void 884static inline void
889fst_rx_dma(struct fst_card_info *card, dma_addr_t skb, 885fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
890 dma_addr_t mem, int len)
891{ 886{
892 /* 887 /*
893 * This routine will setup the DMA and start it 888 * This routine will setup the DMA and start it
894 */ 889 */
895 890
896 dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n", 891 dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
897 (unsigned long) skb, (unsigned long) mem, len);
898 if (card->dmarx_in_progress) { 892 if (card->dmarx_in_progress) {
899 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n"); 893 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
900 } 894 }
901 895
902 outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */ 896 outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
903 outl(mem, card->pci_conf + DMALADR0); /* from here */ 897 outl(mem, card->pci_conf + DMALADR0); /* from here */
904 outl(len, card->pci_conf + DMASIZ0); /* for this length */ 898 outl(len, card->pci_conf + DMASIZ0); /* for this length */
905 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */ 899 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -915,20 +909,19 @@ fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
915 * Send a frame through the DMA 909 * Send a frame through the DMA
916 */ 910 */
917static inline void 911static inline void
918fst_tx_dma(struct fst_card_info *card, unsigned char *skb, 912fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
919 unsigned char *mem, int len)
920{ 913{
921 /* 914 /*
922 * This routine will setup the DMA and start it. 915 * This routine will setup the DMA and start it.
923 */ 916 */
924 917
925 dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len); 918 dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
926 if (card->dmatx_in_progress) { 919 if (card->dmatx_in_progress) {
927 dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n"); 920 dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
928 } 921 }
929 922
930 outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */ 923 outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
931 outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */ 924 outl(mem, card->pci_conf + DMALADR1); /* to here */
932 outl(len, card->pci_conf + DMASIZ1); /* for this length */ 925 outl(len, card->pci_conf + DMASIZ1); /* for this length */
933 outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */ 926 outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
934 927
@@ -1405,9 +1398,7 @@ do_bottom_half_tx(struct fst_card_info *card)
1405 card->dma_len_tx = skb->len; 1398 card->dma_len_tx = skb->len;
1406 card->dma_txpos = port->txpos; 1399 card->dma_txpos = port->txpos;
1407 fst_tx_dma(card, 1400 fst_tx_dma(card,
1408 (char *) card-> 1401 card->tx_dma_handle_card,
1409 tx_dma_handle_card,
1410 (char *)
1411 BUF_OFFSET(txBuffer[pi] 1402 BUF_OFFSET(txBuffer[pi]
1412 [port->txpos][0]), 1403 [port->txpos][0]),
1413 skb->len); 1404 skb->len);
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index de3bbf43fc5a..cdd45fb8a1f6 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1322,10 +1322,6 @@ NOTE: This is rather a useless action right now, as the
1322 1322
1323static int sdla_change_mtu(struct net_device *dev, int new_mtu) 1323static int sdla_change_mtu(struct net_device *dev, int new_mtu)
1324{ 1324{
1325 struct frad_local *flp;
1326
1327 flp = netdev_priv(dev);
1328
1329 if (netif_running(dev)) 1325 if (netif_running(dev))
1330 return -EBUSY; 1326 return -EBUSY;
1331 1327
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4a01e5c7fe09..4c417903e9be 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -1061,7 +1061,7 @@ int i2400m_firmware_check(struct i2400m *i2400m)
1061 goto error_bad_major; 1061 goto error_bad_major;
1062 } 1062 }
1063 result = 0; 1063 result = 0;
1064 if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR) 1064 if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR)
1065 dev_warn(dev, "untested minor fw version %u.%u.%u\n", 1065 dev_warn(dev, "untested minor fw version %u.%u.%u\n",
1066 major, minor, branch); 1066 major, minor, branch);
1067 /* Yes, we ignore the branch -- we don't have to track it */ 1067 /* Yes, we ignore the branch -- we don't have to track it */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c34d2fccfac..9c78090e72f8 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
500 */ 500 */
501int i2400m_pre_reset(struct i2400m *i2400m) 501int i2400m_pre_reset(struct i2400m *i2400m)
502{ 502{
503 int result;
504 struct device *dev = i2400m_dev(i2400m); 503 struct device *dev = i2400m_dev(i2400m);
505 504
506 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 505 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
507 d_printf(1, dev, "pre-reset shut down\n"); 506 d_printf(1, dev, "pre-reset shut down\n");
508 507
509 result = 0;
510 mutex_lock(&i2400m->init_mutex); 508 mutex_lock(&i2400m->init_mutex);
511 if (i2400m->updown) { 509 if (i2400m->updown) {
512 netif_tx_disable(i2400m->wimax_dev.net_dev); 510 netif_tx_disable(i2400m->wimax_dev.net_dev);
513 __i2400m_dev_stop(i2400m); 511 __i2400m_dev_stop(i2400m);
514 result = 0;
515 /* down't set updown to zero -- this way 512 /* down't set updown to zero -- this way
516 * post_reset can restore properly */ 513 * post_reset can restore properly */
517 } 514 }
518 mutex_unlock(&i2400m->init_mutex); 515 mutex_unlock(&i2400m->init_mutex);
519 if (i2400m->bus_release) 516 if (i2400m->bus_release)
520 i2400m->bus_release(i2400m); 517 i2400m->bus_release(i2400m);
521 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 518 d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
522 return result; 519 return 0;
523} 520}
524EXPORT_SYMBOL_GPL(i2400m_pre_reset); 521EXPORT_SYMBOL_GPL(i2400m_pre_reset);
525 522
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b3bfa717d5..d48776e4f343 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -365,15 +365,15 @@ static inline unsigned long at76_get_timeout(struct dfu_status *s)
365static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size, 365static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
366 int manifest_sync_timeout) 366 int manifest_sync_timeout)
367{ 367{
368 u8 *block;
369 struct dfu_status dfu_stat_buf;
370 int ret = 0; 368 int ret = 0;
371 int need_dfu_state = 1; 369 int need_dfu_state = 1;
372 int is_done = 0; 370 int is_done = 0;
373 u8 dfu_state = 0;
374 u32 dfu_timeout = 0; 371 u32 dfu_timeout = 0;
375 int bsize = 0; 372 int bsize = 0;
376 int blockno = 0; 373 int blockno = 0;
374 struct dfu_status *dfu_stat_buf = NULL;
375 u8 *dfu_state = NULL;
376 u8 *block = NULL;
377 377
378 at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size, 378 at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size,
379 manifest_sync_timeout); 379 manifest_sync_timeout);
@@ -383,13 +383,28 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
385 385
386 dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
387 if (!dfu_stat_buf) {
388 ret = -ENOMEM;
389 goto exit;
390 }
391
386 block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL); 392 block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
387 if (!block) 393 if (!block) {
388 return -ENOMEM; 394 ret = -ENOMEM;
395 goto exit;
396 }
397
398 dfu_state = kmalloc(sizeof(u8), GFP_KERNEL);
399 if (!dfu_state) {
400 ret = -ENOMEM;
401 goto exit;
402 }
403 *dfu_state = 0;
389 404
390 do { 405 do {
391 if (need_dfu_state) { 406 if (need_dfu_state) {
392 ret = at76_dfu_get_state(udev, &dfu_state); 407 ret = at76_dfu_get_state(udev, dfu_state);
393 if (ret < 0) { 408 if (ret < 0) {
394 dev_err(&udev->dev, 409 dev_err(&udev->dev,
395 "cannot get DFU state: %d\n", ret); 410 "cannot get DFU state: %d\n", ret);
@@ -398,13 +413,13 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
398 need_dfu_state = 0; 413 need_dfu_state = 0;
399 } 414 }
400 415
401 switch (dfu_state) { 416 switch (*dfu_state) {
402 case STATE_DFU_DOWNLOAD_SYNC: 417 case STATE_DFU_DOWNLOAD_SYNC:
403 at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC"); 418 at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC");
404 ret = at76_dfu_get_status(udev, &dfu_stat_buf); 419 ret = at76_dfu_get_status(udev, dfu_stat_buf);
405 if (ret >= 0) { 420 if (ret >= 0) {
406 dfu_state = dfu_stat_buf.state; 421 *dfu_state = dfu_stat_buf->state;
407 dfu_timeout = at76_get_timeout(&dfu_stat_buf); 422 dfu_timeout = at76_get_timeout(dfu_stat_buf);
408 need_dfu_state = 0; 423 need_dfu_state = 0;
409 } else 424 } else
410 dev_err(&udev->dev, 425 dev_err(&udev->dev,
@@ -447,12 +462,12 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
447 case STATE_DFU_MANIFEST_SYNC: 462 case STATE_DFU_MANIFEST_SYNC:
448 at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC"); 463 at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC");
449 464
450 ret = at76_dfu_get_status(udev, &dfu_stat_buf); 465 ret = at76_dfu_get_status(udev, dfu_stat_buf);
451 if (ret < 0) 466 if (ret < 0)
452 break; 467 break;
453 468
454 dfu_state = dfu_stat_buf.state; 469 *dfu_state = dfu_stat_buf->state;
455 dfu_timeout = at76_get_timeout(&dfu_stat_buf); 470 dfu_timeout = at76_get_timeout(dfu_stat_buf);
456 need_dfu_state = 0; 471 need_dfu_state = 0;
457 472
458 /* override the timeout from the status response, 473 /* override the timeout from the status response,
@@ -484,14 +499,17 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
484 break; 499 break;
485 500
486 default: 501 default:
487 at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", dfu_state); 502 at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", *dfu_state);
488 ret = -EINVAL; 503 ret = -EINVAL;
489 break; 504 break;
490 } 505 }
491 } while (!is_done && (ret >= 0)); 506 } while (!is_done && (ret >= 0));
492 507
493exit: 508exit:
509 kfree(dfu_state);
494 kfree(block); 510 kfree(block);
511 kfree(dfu_stat_buf);
512
495 if (ret >= 0) 513 if (ret >= 0)
496 ret = 0; 514 ret = 0;
497 515
@@ -1277,6 +1295,7 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
1277 dev_err(&udev->dev, 1295 dev_err(&udev->dev,
1278 "loading %dth firmware block failed: %d\n", 1296 "loading %dth firmware block failed: %d\n",
1279 blockno, ret); 1297 blockno, ret);
1298 ret = -EIO;
1280 goto exit; 1299 goto exit;
1281 } 1300 }
1282 buf += bsize; 1301 buf += bsize;
@@ -1410,6 +1429,8 @@ static int at76_startup_device(struct at76_priv *priv)
1410 /* remove BSSID from previous run */ 1429 /* remove BSSID from previous run */
1411 memset(priv->bssid, 0, ETH_ALEN); 1430 memset(priv->bssid, 0, ETH_ALEN);
1412 1431
1432 priv->scanning = false;
1433
1413 if (at76_set_radio(priv, 1) == 1) 1434 if (at76_set_radio(priv, 1) == 1)
1414 at76_wait_completion(priv, CMD_RADIO_ON); 1435 at76_wait_completion(priv, CMD_RADIO_ON);
1415 1436
@@ -1483,6 +1504,52 @@ static void at76_work_submit_rx(struct work_struct *work)
1483 mutex_unlock(&priv->mtx); 1504 mutex_unlock(&priv->mtx);
1484} 1505}
1485 1506
1507/* This is a workaround to make scan working:
1508 * currently mac80211 does not process frames with no frequency
1509 * information.
1510 * However during scan the HW performs a sweep by itself, and we
1511 * are unable to know where the radio is actually tuned.
1512 * This function tries to do its best to guess this information..
1513 * During scan, If the current frame is a beacon or a probe response,
1514 * the channel information is extracted from it.
1515 * When not scanning, for other frames, or if it happens that for
1516 * whatever reason we fail to parse beacons and probe responses, this
1517 * function returns the priv->channel information, that should be correct
1518 * at least when we are not scanning.
1519 */
1520static inline int at76_guess_freq(struct at76_priv *priv)
1521{
1522 size_t el_off;
1523 const u8 *el;
1524 int channel = priv->channel;
1525 int len = priv->rx_skb->len;
1526 struct ieee80211_hdr *hdr = (void *)priv->rx_skb->data;
1527
1528 if (!priv->scanning)
1529 goto exit;
1530
1531 if (len < 24)
1532 goto exit;
1533
1534 if (ieee80211_is_probe_resp(hdr->frame_control)) {
1535 el_off = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
1536 el = ((struct ieee80211_mgmt *)hdr)->u.probe_resp.variable;
1537 } else if (ieee80211_is_beacon(hdr->frame_control)) {
1538 el_off = offsetof(struct ieee80211_mgmt, u.beacon.variable);
1539 el = ((struct ieee80211_mgmt *)hdr)->u.beacon.variable;
1540 } else {
1541 goto exit;
1542 }
1543 len -= el_off;
1544
1545 el = cfg80211_find_ie(WLAN_EID_DS_PARAMS, el, len);
1546 if (el && el[1] > 0)
1547 channel = el[2];
1548
1549exit:
1550 return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
1551}
1552
1486static void at76_rx_tasklet(unsigned long param) 1553static void at76_rx_tasklet(unsigned long param)
1487{ 1554{
1488 struct urb *urb = (struct urb *)param; 1555 struct urb *urb = (struct urb *)param;
@@ -1523,6 +1590,8 @@ static void at76_rx_tasklet(unsigned long param)
1523 rx_status.signal = buf->rssi; 1590 rx_status.signal = buf->rssi;
1524 rx_status.flag |= RX_FLAG_DECRYPTED; 1591 rx_status.flag |= RX_FLAG_DECRYPTED;
1525 rx_status.flag |= RX_FLAG_IV_STRIPPED; 1592 rx_status.flag |= RX_FLAG_IV_STRIPPED;
1593 rx_status.band = IEEE80211_BAND_2GHZ;
1594 rx_status.freq = at76_guess_freq(priv);
1526 1595
1527 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d", 1596 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
1528 priv->rx_skb->len, priv->rx_skb->data_len); 1597 priv->rx_skb->len, priv->rx_skb->data_len);
@@ -1875,6 +1944,8 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1875 if (is_valid_ether_addr(priv->bssid)) 1944 if (is_valid_ether_addr(priv->bssid))
1876 at76_join(priv); 1945 at76_join(priv);
1877 1946
1947 priv->scanning = false;
1948
1878 mutex_unlock(&priv->mtx); 1949 mutex_unlock(&priv->mtx);
1879 1950
1880 ieee80211_scan_completed(priv->hw, false); 1951 ieee80211_scan_completed(priv->hw, false);
@@ -1929,6 +2000,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
1929 goto exit; 2000 goto exit;
1930 } 2001 }
1931 2002
2003 priv->scanning = true;
1932 ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan, 2004 ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan,
1933 SCAN_POLL_INTERVAL); 2005 SCAN_POLL_INTERVAL);
1934 2006
@@ -2020,6 +2092,44 @@ static void at76_configure_filter(struct ieee80211_hw *hw,
2020 ieee80211_queue_work(hw, &priv->work_set_promisc); 2092 ieee80211_queue_work(hw, &priv->work_set_promisc);
2021} 2093}
2022 2094
2095static int at76_set_wep(struct at76_priv *priv)
2096{
2097 int ret = 0;
2098 struct mib_mac_wep *mib_data = &priv->mib_buf.data.wep_mib;
2099
2100 priv->mib_buf.type = MIB_MAC_WEP;
2101 priv->mib_buf.size = sizeof(struct mib_mac_wep);
2102 priv->mib_buf.index = 0;
2103
2104 memset(mib_data, 0, sizeof(*mib_data));
2105
2106 if (priv->wep_enabled) {
2107 if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN)
2108 mib_data->encryption_level = 2;
2109 else
2110 mib_data->encryption_level = 1;
2111
2112 /* always exclude unencrypted if WEP is active */
2113 mib_data->exclude_unencrypted = 1;
2114 } else {
2115 mib_data->exclude_unencrypted = 0;
2116 mib_data->encryption_level = 0;
2117 }
2118
2119 mib_data->privacy_invoked = priv->wep_enabled;
2120 mib_data->wep_default_key_id = priv->wep_key_id;
2121 memcpy(mib_data->wep_default_keyvalue, priv->wep_keys,
2122 sizeof(priv->wep_keys));
2123
2124 ret = at76_set_mib(priv, &priv->mib_buf);
2125
2126 if (ret < 0)
2127 wiphy_err(priv->hw->wiphy,
2128 "set_mib (wep) failed: %d\n", ret);
2129
2130 return ret;
2131}
2132
2023static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2133static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2024 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 2134 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2025 struct ieee80211_key_conf *key) 2135 struct ieee80211_key_conf *key)
@@ -2062,7 +2172,7 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2062 priv->wep_enabled = 1; 2172 priv->wep_enabled = 1;
2063 } 2173 }
2064 2174
2065 at76_startup_device(priv); 2175 at76_set_wep(priv);
2066 2176
2067 mutex_unlock(&priv->mtx); 2177 mutex_unlock(&priv->mtx);
2068 2178
@@ -2330,16 +2440,22 @@ static int at76_probe(struct usb_interface *interface,
2330 struct usb_device *udev; 2440 struct usb_device *udev;
2331 int op_mode; 2441 int op_mode;
2332 int need_ext_fw = 0; 2442 int need_ext_fw = 0;
2333 struct mib_fw_version fwv; 2443 struct mib_fw_version *fwv = NULL;
2334 int board_type = (int)id->driver_info; 2444 int board_type = (int)id->driver_info;
2335 2445
2336 udev = usb_get_dev(interface_to_usbdev(interface)); 2446 udev = usb_get_dev(interface_to_usbdev(interface));
2337 2447
2448 fwv = kmalloc(sizeof(*fwv), GFP_KERNEL);
2449 if (!fwv) {
2450 ret = -ENOMEM;
2451 goto exit;
2452 }
2453
2338 /* Load firmware into kernel memory */ 2454 /* Load firmware into kernel memory */
2339 fwe = at76_load_firmware(udev, board_type); 2455 fwe = at76_load_firmware(udev, board_type);
2340 if (!fwe) { 2456 if (!fwe) {
2341 ret = -ENOENT; 2457 ret = -ENOENT;
2342 goto error; 2458 goto exit;
2343 } 2459 }
2344 2460
2345 op_mode = at76_get_op_mode(udev); 2461 op_mode = at76_get_op_mode(udev);
@@ -2353,7 +2469,7 @@ static int at76_probe(struct usb_interface *interface,
2353 dev_err(&interface->dev, 2469 dev_err(&interface->dev,
2354 "cannot handle a device in HW_CONFIG_MODE\n"); 2470 "cannot handle a device in HW_CONFIG_MODE\n");
2355 ret = -EBUSY; 2471 ret = -EBUSY;
2356 goto error; 2472 goto exit;
2357 } 2473 }
2358 2474
2359 if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH 2475 if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
@@ -2366,10 +2482,10 @@ static int at76_probe(struct usb_interface *interface,
2366 dev_err(&interface->dev, 2482 dev_err(&interface->dev,
2367 "error %d downloading internal firmware\n", 2483 "error %d downloading internal firmware\n",
2368 ret); 2484 ret);
2369 goto error; 2485 goto exit;
2370 } 2486 }
2371 usb_put_dev(udev); 2487 usb_put_dev(udev);
2372 return ret; 2488 goto exit;
2373 } 2489 }
2374 2490
2375 /* Internal firmware already inside the device. Get firmware 2491 /* Internal firmware already inside the device. Get firmware
@@ -2382,8 +2498,8 @@ static int at76_probe(struct usb_interface *interface,
2382 * query the device for the fw version */ 2498 * query the device for the fw version */
2383 if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100) 2499 if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100)
2384 || (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) { 2500 || (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) {
2385 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); 2501 ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
2386 if (ret < 0 || (fwv.major | fwv.minor) == 0) 2502 if (ret < 0 || (fwv->major | fwv->minor) == 0)
2387 need_ext_fw = 1; 2503 need_ext_fw = 1;
2388 } else 2504 } else
2389 /* No way to check firmware version, reload to be sure */ 2505 /* No way to check firmware version, reload to be sure */
@@ -2394,37 +2510,37 @@ static int at76_probe(struct usb_interface *interface,
2394 "downloading external firmware\n"); 2510 "downloading external firmware\n");
2395 2511
2396 ret = at76_load_external_fw(udev, fwe); 2512 ret = at76_load_external_fw(udev, fwe);
2397 if (ret) 2513 if (ret < 0)
2398 goto error; 2514 goto exit;
2399 2515
2400 /* Re-check firmware version */ 2516 /* Re-check firmware version */
2401 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); 2517 ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
2402 if (ret < 0) { 2518 if (ret < 0) {
2403 dev_err(&interface->dev, 2519 dev_err(&interface->dev,
2404 "error %d getting firmware version\n", ret); 2520 "error %d getting firmware version\n", ret);
2405 goto error; 2521 goto exit;
2406 } 2522 }
2407 } 2523 }
2408 2524
2409 priv = at76_alloc_new_device(udev); 2525 priv = at76_alloc_new_device(udev);
2410 if (!priv) { 2526 if (!priv) {
2411 ret = -ENOMEM; 2527 ret = -ENOMEM;
2412 goto error; 2528 goto exit;
2413 } 2529 }
2414 2530
2415 usb_set_intfdata(interface, priv); 2531 usb_set_intfdata(interface, priv);
2416 2532
2417 memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version)); 2533 memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
2418 priv->board_type = board_type; 2534 priv->board_type = board_type;
2419 2535
2420 ret = at76_init_new_device(priv, interface); 2536 ret = at76_init_new_device(priv, interface);
2421 if (ret < 0) 2537 if (ret < 0)
2422 at76_delete_device(priv); 2538 at76_delete_device(priv);
2423 2539
2424 return ret; 2540exit:
2425 2541 kfree(fwv);
2426error: 2542 if (ret < 0)
2427 usb_put_dev(udev); 2543 usb_put_dev(udev);
2428 return ret; 2544 return ret;
2429} 2545}
2430 2546
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index f14a65473fe8..55090a38ac95 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -219,18 +219,6 @@ struct at76_req_join {
219 u8 reserved; 219 u8 reserved;
220} __packed; 220} __packed;
221 221
222struct set_mib_buffer {
223 u8 type;
224 u8 size;
225 u8 index;
226 u8 reserved;
227 union {
228 u8 byte;
229 __le16 word;
230 u8 addr[ETH_ALEN];
231 } data;
232} __packed;
233
234struct mib_local { 222struct mib_local {
235 u16 reserved0; 223 u16 reserved0;
236 u8 beacon_enable; 224 u8 beacon_enable;
@@ -334,6 +322,19 @@ struct mib_mdomain {
334 u8 channel_list[14]; /* 0 for invalid channels */ 322 u8 channel_list[14]; /* 0 for invalid channels */
335} __packed; 323} __packed;
336 324
325struct set_mib_buffer {
326 u8 type;
327 u8 size;
328 u8 index;
329 u8 reserved;
330 union {
331 u8 byte;
332 __le16 word;
333 u8 addr[ETH_ALEN];
334 struct mib_mac_wep wep_mib;
335 } data;
336} __packed;
337
337struct at76_fw_header { 338struct at76_fw_header {
338 __le32 crc; /* CRC32 of the whole image */ 339 __le32 crc; /* CRC32 of the whole image */
339 __le32 board_type; /* firmware compatibility code */ 340 __le32 board_type; /* firmware compatibility code */
@@ -417,6 +418,7 @@ struct at76_priv {
417 int scan_max_time; /* scan max channel time */ 418 int scan_max_time; /* scan max channel time */
418 int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */ 419 int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
419 int scan_need_any; /* if set, need to scan for any ESSID */ 420 int scan_need_any; /* if set, need to scan for any ESSID */
421 bool scanning; /* if set, the scan is running */
420 422
421 u16 assoc_id; /* current association ID, if associated */ 423 u16 assoc_id; /* current association ID, if associated */
422 424
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 507d9a9ee69a..f92050617ae6 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1090 return ret; 1090 return ret;
1091} 1091}
1092 1092
1093static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1093static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1094 u32 queues, bool drop)
1094{ 1095{
1095 struct ar5523 *ar = hw->priv; 1096 struct ar5523 *ar = hw->priv;
1096 1097
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index a1f099628850..17d221abd58c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
175 return 0; 175 return 0;
176} 176}
177 177
178int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) 178int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
179{ 179{
180 struct bmi_cmd cmd; 180 struct bmi_cmd cmd;
181 union bmi_resp resp; 181 union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
184 int ret; 184 int ret;
185 185
186 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", 186 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
187 address, *param); 187 address, param);
188 188
189 if (ar->bmi.done_sent) { 189 if (ar->bmi.done_sent) {
190 ath10k_warn("command disallowed\n"); 190 ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
193 193
194 cmd.id = __cpu_to_le32(BMI_EXECUTE); 194 cmd.id = __cpu_to_le32(BMI_EXECUTE);
195 cmd.execute.addr = __cpu_to_le32(address); 195 cmd.execute.addr = __cpu_to_le32(address);
196 cmd.execute.param = __cpu_to_le32(*param); 196 cmd.execute.param = __cpu_to_le32(param);
197 197
198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); 198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
199 if (ret) { 199 if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
204 if (resplen < sizeof(resp.execute)) { 204 if (resplen < sizeof(resp.execute)) {
205 ath10k_warn("invalid execute response length (%d)\n", 205 ath10k_warn("invalid execute response length (%d)\n",
206 resplen); 206 resplen);
207 return ret; 207 return -EIO;
208 } 208 }
209 209
210 *param = __le32_to_cpu(resp.execute.result); 210 *result = __le32_to_cpu(resp.execute.result);
211
212 ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
213
211 return 0; 214 return 0;
212} 215}
213 216
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 8d81ce1cec21..111ab701465c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -201,7 +201,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
201 \ 201 \
202 addr = host_interest_item_address(HI_ITEM(item)); \ 202 addr = host_interest_item_address(HI_ITEM(item)); \
203 ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ 203 ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
204 *val = __le32_to_cpu(tmp); \ 204 if (!ret) \
205 *val = __le32_to_cpu(tmp); \
205 ret; \ 206 ret; \
206 }) 207 })
207 208
@@ -217,7 +218,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
217 ret; \ 218 ret; \
218 }) 219 })
219 220
220int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); 221int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
221int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); 222int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
222int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); 223int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
223int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, 224int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a79499c82350..d185dc0cd12b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -329,6 +329,33 @@ exit:
329 return ret; 329 return ret;
330} 330}
331 331
332void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
333{
334 struct ath10k *ar = pipe->ar;
335 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
336 struct ath10k_ce_ring *src_ring = pipe->src_ring;
337 u32 ctrl_addr = pipe->ctrl_addr;
338
339 lockdep_assert_held(&ar_pci->ce_lock);
340
341 /*
342 * This function must be called only if there is an incomplete
343 * scatter-gather transfer (before index register is updated)
344 * that needs to be cleaned up.
345 */
346 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
347 return;
348
349 if (WARN_ON_ONCE(src_ring->write_index ==
350 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
351 return;
352
353 src_ring->write_index--;
354 src_ring->write_index &= src_ring->nentries_mask;
355
356 src_ring->per_transfer_context[src_ring->write_index] = NULL;
357}
358
332int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, 359int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
333 void *per_transfer_context, 360 void *per_transfer_context,
334 u32 buffer, 361 u32 buffer,
@@ -840,35 +867,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
840 867
841static int ath10k_ce_init_src_ring(struct ath10k *ar, 868static int ath10k_ce_init_src_ring(struct ath10k *ar,
842 unsigned int ce_id, 869 unsigned int ce_id,
843 struct ath10k_ce_pipe *ce_state,
844 const struct ce_attr *attr) 870 const struct ce_attr *attr)
845{ 871{
846 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
847 struct ath10k_ce_ring *src_ring; 873 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
848 unsigned int nentries = attr->src_nentries; 874 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
849 unsigned int ce_nbytes; 875 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
850 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
851 dma_addr_t base_addr;
852 char *ptr;
853
854 nentries = roundup_pow_of_two(nentries);
855
856 if (ce_state->src_ring) {
857 WARN_ON(ce_state->src_ring->nentries != nentries);
858 return 0;
859 }
860
861 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
862 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
863 if (ptr == NULL)
864 return -ENOMEM;
865 876
866 ce_state->src_ring = (struct ath10k_ce_ring *)ptr; 877 nentries = roundup_pow_of_two(attr->src_nentries);
867 src_ring = ce_state->src_ring;
868 878
869 ptr += sizeof(struct ath10k_ce_ring); 879 memset(src_ring->per_transfer_context, 0,
870 src_ring->nentries = nentries; 880 nentries * sizeof(*src_ring->per_transfer_context));
871 src_ring->nentries_mask = nentries - 1;
872 881
873 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 882 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
874 src_ring->sw_index &= src_ring->nentries_mask; 883 src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +887,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
878 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 887 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
879 src_ring->write_index &= src_ring->nentries_mask; 888 src_ring->write_index &= src_ring->nentries_mask;
880 889
881 src_ring->per_transfer_context = (void **)ptr; 890 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
891 src_ring->base_addr_ce_space);
892 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
893 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
894 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
895 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
896 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
897
898 ath10k_dbg(ATH10K_DBG_BOOT,
899 "boot init ce src ring id %d entries %d base_addr %p\n",
900 ce_id, nentries, src_ring->base_addr_owner_space);
901
902 return 0;
903}
904
905static int ath10k_ce_init_dest_ring(struct ath10k *ar,
906 unsigned int ce_id,
907 const struct ce_attr *attr)
908{
909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
911 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
912 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
913
914 nentries = roundup_pow_of_two(attr->dest_nentries);
915
916 memset(dest_ring->per_transfer_context, 0,
917 nentries * sizeof(*dest_ring->per_transfer_context));
918
919 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
920 dest_ring->sw_index &= dest_ring->nentries_mask;
921 dest_ring->write_index =
922 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
923 dest_ring->write_index &= dest_ring->nentries_mask;
924
925 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
926 dest_ring->base_addr_ce_space);
927 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
928 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
929 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
930 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
931
932 ath10k_dbg(ATH10K_DBG_BOOT,
933 "boot ce dest ring id %d entries %d base_addr %p\n",
934 ce_id, nentries, dest_ring->base_addr_owner_space);
935
936 return 0;
937}
938
939static struct ath10k_ce_ring *
940ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
941 const struct ce_attr *attr)
942{
943 struct ath10k_ce_ring *src_ring;
944 u32 nentries = attr->src_nentries;
945 dma_addr_t base_addr;
946
947 nentries = roundup_pow_of_two(nentries);
948
949 src_ring = kzalloc(sizeof(*src_ring) +
950 (nentries *
951 sizeof(*src_ring->per_transfer_context)),
952 GFP_KERNEL);
953 if (src_ring == NULL)
954 return ERR_PTR(-ENOMEM);
955
956 src_ring->nentries = nentries;
957 src_ring->nentries_mask = nentries - 1;
882 958
883 /* 959 /*
884 * Legacy platforms that do not support cache 960 * Legacy platforms that do not support cache
885 * coherent DMA are unsupported 961 * coherent DMA are unsupported
886 */ 962 */
887 src_ring->base_addr_owner_space_unaligned = 963 src_ring->base_addr_owner_space_unaligned =
888 pci_alloc_consistent(ar_pci->pdev, 964 dma_alloc_coherent(ar->dev,
889 (nentries * sizeof(struct ce_desc) + 965 (nentries * sizeof(struct ce_desc) +
890 CE_DESC_RING_ALIGN), 966 CE_DESC_RING_ALIGN),
891 &base_addr); 967 &base_addr, GFP_KERNEL);
892 if (!src_ring->base_addr_owner_space_unaligned) { 968 if (!src_ring->base_addr_owner_space_unaligned) {
893 kfree(ce_state->src_ring); 969 kfree(src_ring);
894 ce_state->src_ring = NULL; 970 return ERR_PTR(-ENOMEM);
895 return -ENOMEM;
896 } 971 }
897 972
898 src_ring->base_addr_ce_space_unaligned = base_addr; 973 src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +987,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
912 kmalloc((nentries * sizeof(struct ce_desc) + 987 kmalloc((nentries * sizeof(struct ce_desc) +
913 CE_DESC_RING_ALIGN), GFP_KERNEL); 988 CE_DESC_RING_ALIGN), GFP_KERNEL);
914 if (!src_ring->shadow_base_unaligned) { 989 if (!src_ring->shadow_base_unaligned) {
915 pci_free_consistent(ar_pci->pdev, 990 dma_free_coherent(ar->dev,
916 (nentries * sizeof(struct ce_desc) + 991 (nentries * sizeof(struct ce_desc) +
917 CE_DESC_RING_ALIGN), 992 CE_DESC_RING_ALIGN),
918 src_ring->base_addr_owner_space, 993 src_ring->base_addr_owner_space,
919 src_ring->base_addr_ce_space); 994 src_ring->base_addr_ce_space);
920 kfree(ce_state->src_ring); 995 kfree(src_ring);
921 ce_state->src_ring = NULL; 996 return ERR_PTR(-ENOMEM);
922 return -ENOMEM;
923 } 997 }
924 998
925 src_ring->shadow_base = PTR_ALIGN( 999 src_ring->shadow_base = PTR_ALIGN(
926 src_ring->shadow_base_unaligned, 1000 src_ring->shadow_base_unaligned,
927 CE_DESC_RING_ALIGN); 1001 CE_DESC_RING_ALIGN);
928 1002
929 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 1003 return src_ring;
930 src_ring->base_addr_ce_space);
931 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
932 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
933 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
934 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
935 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
936
937 ath10k_dbg(ATH10K_DBG_BOOT,
938 "boot ce src ring id %d entries %d base_addr %p\n",
939 ce_id, nentries, src_ring->base_addr_owner_space);
940
941 return 0;
942} 1004}
943 1005
944static int ath10k_ce_init_dest_ring(struct ath10k *ar, 1006static struct ath10k_ce_ring *
945 unsigned int ce_id, 1007ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
946 struct ath10k_ce_pipe *ce_state, 1008 const struct ce_attr *attr)
947 const struct ce_attr *attr)
948{ 1009{
949 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
950 struct ath10k_ce_ring *dest_ring; 1010 struct ath10k_ce_ring *dest_ring;
951 unsigned int nentries = attr->dest_nentries; 1011 u32 nentries;
952 unsigned int ce_nbytes;
953 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
954 dma_addr_t base_addr; 1012 dma_addr_t base_addr;
955 char *ptr;
956 1013
957 nentries = roundup_pow_of_two(nentries); 1014 nentries = roundup_pow_of_two(attr->dest_nentries);
958 1015
959 if (ce_state->dest_ring) { 1016 dest_ring = kzalloc(sizeof(*dest_ring) +
960 WARN_ON(ce_state->dest_ring->nentries != nentries); 1017 (nentries *
961 return 0; 1018 sizeof(*dest_ring->per_transfer_context)),
962 } 1019 GFP_KERNEL);
963 1020 if (dest_ring == NULL)
964 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); 1021 return ERR_PTR(-ENOMEM);
965 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
966 if (ptr == NULL)
967 return -ENOMEM;
968 1022
969 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
970 dest_ring = ce_state->dest_ring;
971
972 ptr += sizeof(struct ath10k_ce_ring);
973 dest_ring->nentries = nentries; 1023 dest_ring->nentries = nentries;
974 dest_ring->nentries_mask = nentries - 1; 1024 dest_ring->nentries_mask = nentries - 1;
975 1025
976 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
977 dest_ring->sw_index &= dest_ring->nentries_mask;
978 dest_ring->write_index =
979 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
980 dest_ring->write_index &= dest_ring->nentries_mask;
981
982 dest_ring->per_transfer_context = (void **)ptr;
983
984 /* 1026 /*
985 * Legacy platforms that do not support cache 1027 * Legacy platforms that do not support cache
986 * coherent DMA are unsupported 1028 * coherent DMA are unsupported
987 */ 1029 */
988 dest_ring->base_addr_owner_space_unaligned = 1030 dest_ring->base_addr_owner_space_unaligned =
989 pci_alloc_consistent(ar_pci->pdev, 1031 dma_alloc_coherent(ar->dev,
990 (nentries * sizeof(struct ce_desc) + 1032 (nentries * sizeof(struct ce_desc) +
991 CE_DESC_RING_ALIGN), 1033 CE_DESC_RING_ALIGN),
992 &base_addr); 1034 &base_addr, GFP_KERNEL);
993 if (!dest_ring->base_addr_owner_space_unaligned) { 1035 if (!dest_ring->base_addr_owner_space_unaligned) {
994 kfree(ce_state->dest_ring); 1036 kfree(dest_ring);
995 ce_state->dest_ring = NULL; 1037 return ERR_PTR(-ENOMEM);
996 return -ENOMEM;
997 } 1038 }
998 1039
999 dest_ring->base_addr_ce_space_unaligned = base_addr; 1040 dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1053,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1012 dest_ring->base_addr_ce_space_unaligned, 1053 dest_ring->base_addr_ce_space_unaligned,
1013 CE_DESC_RING_ALIGN); 1054 CE_DESC_RING_ALIGN);
1014 1055
1015 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 1056 return dest_ring;
1016 dest_ring->base_addr_ce_space);
1017 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1018 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1019 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1020 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1021
1022 ath10k_dbg(ATH10K_DBG_BOOT,
1023 "boot ce dest ring id %d entries %d base_addr %p\n",
1024 ce_id, nentries, dest_ring->base_addr_owner_space);
1025
1026 return 0;
1027}
1028
1029static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1030 unsigned int ce_id,
1031 const struct ce_attr *attr)
1032{
1033 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1034 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1035 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1036
1037 spin_lock_bh(&ar_pci->ce_lock);
1038
1039 ce_state->ar = ar;
1040 ce_state->id = ce_id;
1041 ce_state->ctrl_addr = ctrl_addr;
1042 ce_state->attr_flags = attr->flags;
1043 ce_state->src_sz_max = attr->src_sz_max;
1044
1045 spin_unlock_bh(&ar_pci->ce_lock);
1046
1047 return ce_state;
1048} 1057}
1049 1058
1050/* 1059/*
@@ -1054,11 +1063,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1054 * initialization. It may be that only one side or the other is 1063 * initialization. It may be that only one side or the other is
1055 * initialized by software/firmware. 1064 * initialized by software/firmware.
1056 */ 1065 */
1057struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 1066int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1058 unsigned int ce_id, 1067 const struct ce_attr *attr)
1059 const struct ce_attr *attr)
1060{ 1068{
1061 struct ath10k_ce_pipe *ce_state; 1069 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1070 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1062 int ret; 1071 int ret;
1063 1072
1064 /* 1073 /*
@@ -1074,64 +1083,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1074 1083
1075 ret = ath10k_pci_wake(ar); 1084 ret = ath10k_pci_wake(ar);
1076 if (ret) 1085 if (ret)
1077 return NULL; 1086 return ret;
1078 1087
1079 ce_state = ath10k_ce_init_state(ar, ce_id, attr); 1088 spin_lock_bh(&ar_pci->ce_lock);
1080 if (!ce_state) { 1089 ce_state->ar = ar;
1081 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); 1090 ce_state->id = ce_id;
1082 goto out; 1091 ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
1083 } 1092 ce_state->attr_flags = attr->flags;
1093 ce_state->src_sz_max = attr->src_sz_max;
1094 spin_unlock_bh(&ar_pci->ce_lock);
1084 1095
1085 if (attr->src_nentries) { 1096 if (attr->src_nentries) {
1086 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); 1097 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1087 if (ret) { 1098 if (ret) {
1088 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", 1099 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1089 ce_id, ret); 1100 ce_id, ret);
1090 ath10k_ce_deinit(ce_state);
1091 ce_state = NULL;
1092 goto out; 1101 goto out;
1093 } 1102 }
1094 } 1103 }
1095 1104
1096 if (attr->dest_nentries) { 1105 if (attr->dest_nentries) {
1097 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); 1106 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1098 if (ret) { 1107 if (ret) {
1099 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", 1108 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1100 ce_id, ret); 1109 ce_id, ret);
1101 ath10k_ce_deinit(ce_state);
1102 ce_state = NULL;
1103 goto out; 1110 goto out;
1104 } 1111 }
1105 } 1112 }
1106 1113
1107out: 1114out:
1108 ath10k_pci_sleep(ar); 1115 ath10k_pci_sleep(ar);
1109 return ce_state; 1116 return ret;
1110} 1117}
1111 1118
1112void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) 1119static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1120{
1121 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1122
1123 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1124 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1125 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1126 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1127}
1128
1129static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1130{
1131 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1132
1133 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1134 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1135 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1136}
1137
1138void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1139{
1140 int ret;
1141
1142 ret = ath10k_pci_wake(ar);
1143 if (ret)
1144 return;
1145
1146 ath10k_ce_deinit_src_ring(ar, ce_id);
1147 ath10k_ce_deinit_dest_ring(ar, ce_id);
1148
1149 ath10k_pci_sleep(ar);
1150}
1151
1152int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1153 const struct ce_attr *attr)
1154{
1155 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1156 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1157 int ret;
1158
1159 if (attr->src_nentries) {
1160 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1161 if (IS_ERR(ce_state->src_ring)) {
1162 ret = PTR_ERR(ce_state->src_ring);
1163 ath10k_err("failed to allocate copy engine source ring %d: %d\n",
1164 ce_id, ret);
1165 ce_state->src_ring = NULL;
1166 return ret;
1167 }
1168 }
1169
1170 if (attr->dest_nentries) {
1171 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1172 attr);
1173 if (IS_ERR(ce_state->dest_ring)) {
1174 ret = PTR_ERR(ce_state->dest_ring);
1175 ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
1176 ce_id, ret);
1177 ce_state->dest_ring = NULL;
1178 return ret;
1179 }
1180 }
1181
1182 return 0;
1183}
1184
1185void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1113{ 1186{
1114 struct ath10k *ar = ce_state->ar;
1115 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1187 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1188 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1116 1189
1117 if (ce_state->src_ring) { 1190 if (ce_state->src_ring) {
1118 kfree(ce_state->src_ring->shadow_base_unaligned); 1191 kfree(ce_state->src_ring->shadow_base_unaligned);
1119 pci_free_consistent(ar_pci->pdev, 1192 dma_free_coherent(ar->dev,
1120 (ce_state->src_ring->nentries * 1193 (ce_state->src_ring->nentries *
1121 sizeof(struct ce_desc) + 1194 sizeof(struct ce_desc) +
1122 CE_DESC_RING_ALIGN), 1195 CE_DESC_RING_ALIGN),
1123 ce_state->src_ring->base_addr_owner_space, 1196 ce_state->src_ring->base_addr_owner_space,
1124 ce_state->src_ring->base_addr_ce_space); 1197 ce_state->src_ring->base_addr_ce_space);
1125 kfree(ce_state->src_ring); 1198 kfree(ce_state->src_ring);
1126 } 1199 }
1127 1200
1128 if (ce_state->dest_ring) { 1201 if (ce_state->dest_ring) {
1129 pci_free_consistent(ar_pci->pdev, 1202 dma_free_coherent(ar->dev,
1130 (ce_state->dest_ring->nentries * 1203 (ce_state->dest_ring->nentries *
1131 sizeof(struct ce_desc) + 1204 sizeof(struct ce_desc) +
1132 CE_DESC_RING_ALIGN), 1205 CE_DESC_RING_ALIGN),
1133 ce_state->dest_ring->base_addr_owner_space, 1206 ce_state->dest_ring->base_addr_owner_space,
1134 ce_state->dest_ring->base_addr_ce_space); 1207 ce_state->dest_ring->base_addr_ce_space);
1135 kfree(ce_state->dest_ring); 1208 kfree(ce_state->dest_ring);
1136 } 1209 }
1137 1210
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 8eb7f99ed992..7a5a36fc59c1 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
104 void *shadow_base_unaligned; 104 void *shadow_base_unaligned;
105 struct ce_desc *shadow_base; 105 struct ce_desc *shadow_base;
106 106
107 void **per_transfer_context; 107 /* keep last */
108 void *per_transfer_context[0];
108}; 109};
109 110
110struct ath10k_ce_pipe { 111struct ath10k_ce_pipe {
@@ -159,6 +160,8 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
159 unsigned int transfer_id, 160 unsigned int transfer_id,
160 unsigned int flags); 161 unsigned int flags);
161 162
163void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
164
162void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, 165void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
163 void (*send_cb)(struct ath10k_ce_pipe *), 166 void (*send_cb)(struct ath10k_ce_pipe *),
164 int disable_interrupts); 167 int disable_interrupts);
@@ -210,10 +213,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
210 213
211/*==================CE Engine Initialization=======================*/ 214/*==================CE Engine Initialization=======================*/
212 215
213/* Initialize an instance of a CE */ 216int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
214struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 217 const struct ce_attr *attr);
215 unsigned int ce_id, 218void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
216 const struct ce_attr *attr); 219int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
220 const struct ce_attr *attr);
221void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
217 222
218/*==================CE Engine Shutdown=======================*/ 223/*==================CE Engine Shutdown=======================*/
219/* 224/*
@@ -236,8 +241,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
236 unsigned int *nbytesp, 241 unsigned int *nbytesp,
237 unsigned int *transfer_idp); 242 unsigned int *transfer_idp);
238 243
239void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
240
241/*==================CE Interrupt Handlers====================*/ 244/*==================CE Interrupt Handlers====================*/
242void ath10k_ce_per_engine_service_any(struct ath10k *ar); 245void ath10k_ce_per_engine_service_any(struct ath10k *ar);
243void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); 246void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ebc5fc2ede75..82017f56e661 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -58,36 +58,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
58 complete(&ar->target_suspend); 58 complete(&ar->target_suspend);
59} 59}
60 60
61static int ath10k_init_connect_htc(struct ath10k *ar)
62{
63 int status;
64
65 status = ath10k_wmi_connect_htc_service(ar);
66 if (status)
67 goto conn_fail;
68
69 /* Start HTC */
70 status = ath10k_htc_start(&ar->htc);
71 if (status)
72 goto conn_fail;
73
74 /* Wait for WMI event to be ready */
75 status = ath10k_wmi_wait_for_service_ready(ar);
76 if (status <= 0) {
77 ath10k_warn("wmi service ready event not received");
78 status = -ETIMEDOUT;
79 goto timeout;
80 }
81
82 ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
83 return 0;
84
85timeout:
86 ath10k_htc_stop(&ar->htc);
87conn_fail:
88 return status;
89}
90
91static int ath10k_init_configure_target(struct ath10k *ar) 61static int ath10k_init_configure_target(struct ath10k *ar)
92{ 62{
93 u32 param_host; 63 u32 param_host;
@@ -249,30 +219,40 @@ exit:
249 219
250static int ath10k_download_and_run_otp(struct ath10k *ar) 220static int ath10k_download_and_run_otp(struct ath10k *ar)
251{ 221{
252 u32 address = ar->hw_params.patch_load_addr; 222 u32 result, address = ar->hw_params.patch_load_addr;
253 u32 exec_param;
254 int ret; 223 int ret;
255 224
256 /* OTP is optional */ 225 /* OTP is optional */
257 226
258 if (!ar->otp_data || !ar->otp_len) 227 if (!ar->otp_data || !ar->otp_len) {
228 ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
229 ar->otp_data, ar->otp_len);
259 return 0; 230 return 0;
231 }
232
233 ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
234 address, ar->otp_len);
260 235
261 ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); 236 ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
262 if (ret) { 237 if (ret) {
263 ath10k_err("could not write otp (%d)\n", ret); 238 ath10k_err("could not write otp (%d)\n", ret);
264 goto exit; 239 return ret;
265 } 240 }
266 241
267 exec_param = 0; 242 ret = ath10k_bmi_execute(ar, address, 0, &result);
268 ret = ath10k_bmi_execute(ar, address, &exec_param);
269 if (ret) { 243 if (ret) {
270 ath10k_err("could not execute otp (%d)\n", ret); 244 ath10k_err("could not execute otp (%d)\n", ret);
271 goto exit; 245 return ret;
272 } 246 }
273 247
274exit: 248 ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
275 return ret; 249
250 if (result != 0) {
251 ath10k_err("otp calibration failed: %d", result);
252 return -EINVAL;
253 }
254
255 return 0;
276} 256}
277 257
278static int ath10k_download_fw(struct ath10k *ar) 258static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +369,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
389 /* first fetch the firmware file (firmware-*.bin) */ 369 /* first fetch the firmware file (firmware-*.bin) */
390 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); 370 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
391 if (IS_ERR(ar->firmware)) { 371 if (IS_ERR(ar->firmware)) {
392 ath10k_err("Could not fetch firmware file '%s': %ld\n", 372 ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
393 name, PTR_ERR(ar->firmware)); 373 ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
394 return PTR_ERR(ar->firmware); 374 return PTR_ERR(ar->firmware);
395 } 375 }
396 376
@@ -401,14 +381,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
401 magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; 381 magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
402 382
403 if (len < magic_len) { 383 if (len < magic_len) {
404 ath10k_err("firmware image too small to contain magic: %zu\n", 384 ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
405 len); 385 ar->hw_params.fw.dir, name, len);
406 ret = -EINVAL; 386 ret = -EINVAL;
407 goto err; 387 goto err;
408 } 388 }
409 389
410 if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { 390 if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
411 ath10k_err("Invalid firmware magic\n"); 391 ath10k_err("invalid firmware magic\n");
412 ret = -EINVAL; 392 ret = -EINVAL;
413 goto err; 393 goto err;
414 } 394 }
@@ -430,7 +410,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
430 data += sizeof(*hdr); 410 data += sizeof(*hdr);
431 411
432 if (len < ie_len) { 412 if (len < ie_len) {
433 ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n", 413 ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
434 ie_id, len, ie_len); 414 ie_id, len, ie_len);
435 ret = -EINVAL; 415 ret = -EINVAL;
436 goto err; 416 goto err;
@@ -513,8 +493,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
513 } 493 }
514 494
515 if (!ar->firmware_data || !ar->firmware_len) { 495 if (!ar->firmware_data || !ar->firmware_len) {
516 ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n", 496 ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
517 name); 497 ar->hw_params.fw.dir, name);
518 ret = -ENOMEDIUM; 498 ret = -ENOMEDIUM;
519 goto err; 499 goto err;
520 } 500 }
@@ -531,7 +511,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
531 ar->hw_params.fw.board); 511 ar->hw_params.fw.board);
532 if (IS_ERR(ar->board)) { 512 if (IS_ERR(ar->board)) {
533 ret = PTR_ERR(ar->board); 513 ret = PTR_ERR(ar->board);
534 ath10k_err("could not fetch board data (%d)\n", ret); 514 ath10k_err("could not fetch board data '%s/%s' (%d)\n",
515 ar->hw_params.fw.dir, ar->hw_params.fw.board,
516 ret);
535 goto err; 517 goto err;
536 } 518 }
537 519
@@ -549,19 +531,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
549{ 531{
550 int ret; 532 int ret;
551 533
534 ar->fw_api = 2;
535 ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
536
552 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); 537 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
553 if (ret == 0) { 538 if (ret == 0)
554 ar->fw_api = 2; 539 goto success;
555 goto out; 540
556 } 541 ar->fw_api = 1;
542 ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
557 543
558 ret = ath10k_core_fetch_firmware_api_1(ar); 544 ret = ath10k_core_fetch_firmware_api_1(ar);
559 if (ret) 545 if (ret)
560 return ret; 546 return ret;
561 547
562 ar->fw_api = 1; 548success:
563
564out:
565 ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); 549 ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
566 550
567 return 0; 551 return 0;
@@ -572,16 +556,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
572 int ret; 556 int ret;
573 557
574 ret = ath10k_download_board_data(ar); 558 ret = ath10k_download_board_data(ar);
575 if (ret) 559 if (ret) {
560 ath10k_err("failed to download board data: %d\n", ret);
576 return ret; 561 return ret;
562 }
577 563
578 ret = ath10k_download_and_run_otp(ar); 564 ret = ath10k_download_and_run_otp(ar);
579 if (ret) 565 if (ret) {
566 ath10k_err("failed to run otp: %d\n", ret);
580 return ret; 567 return ret;
568 }
581 569
582 ret = ath10k_download_fw(ar); 570 ret = ath10k_download_fw(ar);
583 if (ret) 571 if (ret) {
572 ath10k_err("failed to download firmware: %d\n", ret);
584 return ret; 573 return ret;
574 }
585 575
586 return ret; 576 return ret;
587} 577}
@@ -660,8 +650,9 @@ static void ath10k_core_restart(struct work_struct *work)
660 650
661 switch (ar->state) { 651 switch (ar->state) {
662 case ATH10K_STATE_ON: 652 case ATH10K_STATE_ON:
663 ath10k_halt(ar);
664 ar->state = ATH10K_STATE_RESTARTING; 653 ar->state = ATH10K_STATE_RESTARTING;
654 del_timer_sync(&ar->scan.timeout);
655 ath10k_reset_scan((unsigned long)ar);
665 ieee80211_restart_hw(ar->hw); 656 ieee80211_restart_hw(ar->hw);
666 break; 657 break;
667 case ATH10K_STATE_OFF: 658 case ATH10K_STATE_OFF:
@@ -670,6 +661,8 @@ static void ath10k_core_restart(struct work_struct *work)
670 ath10k_warn("cannot restart a device that hasn't been started\n"); 661 ath10k_warn("cannot restart a device that hasn't been started\n");
671 break; 662 break;
672 case ATH10K_STATE_RESTARTING: 663 case ATH10K_STATE_RESTARTING:
664 /* hw restart might be requested from multiple places */
665 break;
673 case ATH10K_STATE_RESTARTED: 666 case ATH10K_STATE_RESTARTED:
674 ar->state = ATH10K_STATE_WEDGED; 667 ar->state = ATH10K_STATE_WEDGED;
675 /* fall through */ 668 /* fall through */
@@ -681,70 +674,6 @@ static void ath10k_core_restart(struct work_struct *work)
681 mutex_unlock(&ar->conf_mutex); 674 mutex_unlock(&ar->conf_mutex);
682} 675}
683 676
684struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
685 const struct ath10k_hif_ops *hif_ops)
686{
687 struct ath10k *ar;
688
689 ar = ath10k_mac_create();
690 if (!ar)
691 return NULL;
692
693 ar->ath_common.priv = ar;
694 ar->ath_common.hw = ar->hw;
695
696 ar->p2p = !!ath10k_p2p;
697 ar->dev = dev;
698
699 ar->hif.priv = hif_priv;
700 ar->hif.ops = hif_ops;
701
702 init_completion(&ar->scan.started);
703 init_completion(&ar->scan.completed);
704 init_completion(&ar->scan.on_channel);
705 init_completion(&ar->target_suspend);
706
707 init_completion(&ar->install_key_done);
708 init_completion(&ar->vdev_setup_done);
709
710 setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
711
712 ar->workqueue = create_singlethread_workqueue("ath10k_wq");
713 if (!ar->workqueue)
714 goto err_wq;
715
716 mutex_init(&ar->conf_mutex);
717 spin_lock_init(&ar->data_lock);
718
719 INIT_LIST_HEAD(&ar->peers);
720 init_waitqueue_head(&ar->peer_mapping_wq);
721
722 init_completion(&ar->offchan_tx_completed);
723 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
724 skb_queue_head_init(&ar->offchan_tx_queue);
725
726 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
727 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
728
729 INIT_WORK(&ar->restart_work, ath10k_core_restart);
730
731 return ar;
732
733err_wq:
734 ath10k_mac_destroy(ar);
735 return NULL;
736}
737EXPORT_SYMBOL(ath10k_core_create);
738
739void ath10k_core_destroy(struct ath10k *ar)
740{
741 flush_workqueue(ar->workqueue);
742 destroy_workqueue(ar->workqueue);
743
744 ath10k_mac_destroy(ar);
745}
746EXPORT_SYMBOL(ath10k_core_destroy);
747
748int ath10k_core_start(struct ath10k *ar) 677int ath10k_core_start(struct ath10k *ar)
749{ 678{
750 int status; 679 int status;
@@ -785,10 +714,28 @@ int ath10k_core_start(struct ath10k *ar)
785 goto err; 714 goto err;
786 } 715 }
787 716
717 status = ath10k_htt_init(ar);
718 if (status) {
719 ath10k_err("failed to init htt: %d\n", status);
720 goto err_wmi_detach;
721 }
722
723 status = ath10k_htt_tx_alloc(&ar->htt);
724 if (status) {
725 ath10k_err("failed to alloc htt tx: %d\n", status);
726 goto err_wmi_detach;
727 }
728
729 status = ath10k_htt_rx_alloc(&ar->htt);
730 if (status) {
731 ath10k_err("failed to alloc htt rx: %d\n", status);
732 goto err_htt_tx_detach;
733 }
734
788 status = ath10k_hif_start(ar); 735 status = ath10k_hif_start(ar);
789 if (status) { 736 if (status) {
790 ath10k_err("could not start HIF: %d\n", status); 737 ath10k_err("could not start HIF: %d\n", status);
791 goto err_wmi_detach; 738 goto err_htt_rx_detach;
792 } 739 }
793 740
794 status = ath10k_htc_wait_target(&ar->htc); 741 status = ath10k_htc_wait_target(&ar->htc);
@@ -797,15 +744,30 @@ int ath10k_core_start(struct ath10k *ar)
797 goto err_hif_stop; 744 goto err_hif_stop;
798 } 745 }
799 746
800 status = ath10k_htt_attach(ar); 747 status = ath10k_htt_connect(&ar->htt);
801 if (status) { 748 if (status) {
802 ath10k_err("could not attach htt (%d)\n", status); 749 ath10k_err("failed to connect htt (%d)\n", status);
803 goto err_hif_stop; 750 goto err_hif_stop;
804 } 751 }
805 752
806 status = ath10k_init_connect_htc(ar); 753 status = ath10k_wmi_connect(ar);
807 if (status) 754 if (status) {
808 goto err_htt_detach; 755 ath10k_err("could not connect wmi: %d\n", status);
756 goto err_hif_stop;
757 }
758
759 status = ath10k_htc_start(&ar->htc);
760 if (status) {
761 ath10k_err("failed to start htc: %d\n", status);
762 goto err_hif_stop;
763 }
764
765 status = ath10k_wmi_wait_for_service_ready(ar);
766 if (status <= 0) {
767 ath10k_warn("wmi service ready event not received");
768 status = -ETIMEDOUT;
769 goto err_htc_stop;
770 }
809 771
810 ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", 772 ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
811 ar->hw->wiphy->fw_version); 773 ar->hw->wiphy->fw_version);
@@ -813,31 +775,36 @@ int ath10k_core_start(struct ath10k *ar)
813 status = ath10k_wmi_cmd_init(ar); 775 status = ath10k_wmi_cmd_init(ar);
814 if (status) { 776 if (status) {
815 ath10k_err("could not send WMI init command (%d)\n", status); 777 ath10k_err("could not send WMI init command (%d)\n", status);
816 goto err_disconnect_htc; 778 goto err_htc_stop;
817 } 779 }
818 780
819 status = ath10k_wmi_wait_for_unified_ready(ar); 781 status = ath10k_wmi_wait_for_unified_ready(ar);
820 if (status <= 0) { 782 if (status <= 0) {
821 ath10k_err("wmi unified ready event not received\n"); 783 ath10k_err("wmi unified ready event not received\n");
822 status = -ETIMEDOUT; 784 status = -ETIMEDOUT;
823 goto err_disconnect_htc; 785 goto err_htc_stop;
824 } 786 }
825 787
826 status = ath10k_htt_attach_target(&ar->htt); 788 status = ath10k_htt_setup(&ar->htt);
827 if (status) 789 if (status) {
828 goto err_disconnect_htc; 790 ath10k_err("failed to setup htt: %d\n", status);
791 goto err_htc_stop;
792 }
829 793
830 status = ath10k_debug_start(ar); 794 status = ath10k_debug_start(ar);
831 if (status) 795 if (status)
832 goto err_disconnect_htc; 796 goto err_htc_stop;
833 797
834 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 798 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
835 INIT_LIST_HEAD(&ar->arvifs); 799 INIT_LIST_HEAD(&ar->arvifs);
836 800
837 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 801 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
838 ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n", 802 ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
839 ar->hw_params.name, ar->target_version, 803 ar->hw_params.name,
840 ar->hw->wiphy->fw_version, ar->fw_api, 804 ar->target_version,
805 ar->chip_id,
806 ar->hw->wiphy->fw_version,
807 ar->fw_api,
841 ar->htt.target_version_major, 808 ar->htt.target_version_major,
842 ar->htt.target_version_minor); 809 ar->htt.target_version_minor);
843 810
@@ -845,12 +812,14 @@ int ath10k_core_start(struct ath10k *ar)
845 812
846 return 0; 813 return 0;
847 814
848err_disconnect_htc: 815err_htc_stop:
849 ath10k_htc_stop(&ar->htc); 816 ath10k_htc_stop(&ar->htc);
850err_htt_detach:
851 ath10k_htt_detach(&ar->htt);
852err_hif_stop: 817err_hif_stop:
853 ath10k_hif_stop(ar); 818 ath10k_hif_stop(ar);
819err_htt_rx_detach:
820 ath10k_htt_rx_free(&ar->htt);
821err_htt_tx_detach:
822 ath10k_htt_tx_free(&ar->htt);
854err_wmi_detach: 823err_wmi_detach:
855 ath10k_wmi_detach(ar); 824 ath10k_wmi_detach(ar);
856err: 825err:
@@ -885,10 +854,14 @@ void ath10k_core_stop(struct ath10k *ar)
885 lockdep_assert_held(&ar->conf_mutex); 854 lockdep_assert_held(&ar->conf_mutex);
886 855
887 /* try to suspend target */ 856 /* try to suspend target */
888 ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); 857 if (ar->state != ATH10K_STATE_RESTARTING)
858 ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
859
889 ath10k_debug_stop(ar); 860 ath10k_debug_stop(ar);
890 ath10k_htc_stop(&ar->htc); 861 ath10k_htc_stop(&ar->htc);
891 ath10k_htt_detach(&ar->htt); 862 ath10k_hif_stop(ar);
863 ath10k_htt_tx_free(&ar->htt);
864 ath10k_htt_rx_free(&ar->htt);
892 ath10k_wmi_detach(ar); 865 ath10k_wmi_detach(ar);
893} 866}
894EXPORT_SYMBOL(ath10k_core_stop); 867EXPORT_SYMBOL(ath10k_core_stop);
@@ -980,22 +953,15 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
980 return 0; 953 return 0;
981} 954}
982 955
983int ath10k_core_register(struct ath10k *ar, u32 chip_id) 956static void ath10k_core_register_work(struct work_struct *work)
984{ 957{
958 struct ath10k *ar = container_of(work, struct ath10k, register_work);
985 int status; 959 int status;
986 960
987 ar->chip_id = chip_id;
988
989 status = ath10k_core_check_chip_id(ar);
990 if (status) {
991 ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
992 return status;
993 }
994
995 status = ath10k_core_probe_fw(ar); 961 status = ath10k_core_probe_fw(ar);
996 if (status) { 962 if (status) {
997 ath10k_err("could not probe fw (%d)\n", status); 963 ath10k_err("could not probe fw (%d)\n", status);
998 return status; 964 goto err;
999 } 965 }
1000 966
1001 status = ath10k_mac_register(ar); 967 status = ath10k_mac_register(ar);
@@ -1010,18 +976,43 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id)
1010 goto err_unregister_mac; 976 goto err_unregister_mac;
1011 } 977 }
1012 978
1013 return 0; 979 set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
980 return;
1014 981
1015err_unregister_mac: 982err_unregister_mac:
1016 ath10k_mac_unregister(ar); 983 ath10k_mac_unregister(ar);
1017err_release_fw: 984err_release_fw:
1018 ath10k_core_free_firmware_files(ar); 985 ath10k_core_free_firmware_files(ar);
1019 return status; 986err:
987 device_release_driver(ar->dev);
988 return;
989}
990
991int ath10k_core_register(struct ath10k *ar, u32 chip_id)
992{
993 int status;
994
995 ar->chip_id = chip_id;
996
997 status = ath10k_core_check_chip_id(ar);
998 if (status) {
999 ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
1000 return status;
1001 }
1002
1003 queue_work(ar->workqueue, &ar->register_work);
1004
1005 return 0;
1020} 1006}
1021EXPORT_SYMBOL(ath10k_core_register); 1007EXPORT_SYMBOL(ath10k_core_register);
1022 1008
1023void ath10k_core_unregister(struct ath10k *ar) 1009void ath10k_core_unregister(struct ath10k *ar)
1024{ 1010{
1011 cancel_work_sync(&ar->register_work);
1012
1013 if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
1014 return;
1015
1025 /* We must unregister from mac80211 before we stop HTC and HIF. 1016 /* We must unregister from mac80211 before we stop HTC and HIF.
1026 * Otherwise we will fail to submit commands to FW and mac80211 will be 1017 * Otherwise we will fail to submit commands to FW and mac80211 will be
1027 * unhappy about callback failures. */ 1018 * unhappy about callback failures. */
@@ -1033,6 +1024,71 @@ void ath10k_core_unregister(struct ath10k *ar)
1033} 1024}
1034EXPORT_SYMBOL(ath10k_core_unregister); 1025EXPORT_SYMBOL(ath10k_core_unregister);
1035 1026
1027struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
1028 const struct ath10k_hif_ops *hif_ops)
1029{
1030 struct ath10k *ar;
1031
1032 ar = ath10k_mac_create();
1033 if (!ar)
1034 return NULL;
1035
1036 ar->ath_common.priv = ar;
1037 ar->ath_common.hw = ar->hw;
1038
1039 ar->p2p = !!ath10k_p2p;
1040 ar->dev = dev;
1041
1042 ar->hif.priv = hif_priv;
1043 ar->hif.ops = hif_ops;
1044
1045 init_completion(&ar->scan.started);
1046 init_completion(&ar->scan.completed);
1047 init_completion(&ar->scan.on_channel);
1048 init_completion(&ar->target_suspend);
1049
1050 init_completion(&ar->install_key_done);
1051 init_completion(&ar->vdev_setup_done);
1052
1053 setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
1054
1055 ar->workqueue = create_singlethread_workqueue("ath10k_wq");
1056 if (!ar->workqueue)
1057 goto err_wq;
1058
1059 mutex_init(&ar->conf_mutex);
1060 spin_lock_init(&ar->data_lock);
1061
1062 INIT_LIST_HEAD(&ar->peers);
1063 init_waitqueue_head(&ar->peer_mapping_wq);
1064
1065 init_completion(&ar->offchan_tx_completed);
1066 INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
1067 skb_queue_head_init(&ar->offchan_tx_queue);
1068
1069 INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
1070 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
1071
1072 INIT_WORK(&ar->register_work, ath10k_core_register_work);
1073 INIT_WORK(&ar->restart_work, ath10k_core_restart);
1074
1075 return ar;
1076
1077err_wq:
1078 ath10k_mac_destroy(ar);
1079 return NULL;
1080}
1081EXPORT_SYMBOL(ath10k_core_create);
1082
1083void ath10k_core_destroy(struct ath10k *ar)
1084{
1085 flush_workqueue(ar->workqueue);
1086 destroy_workqueue(ar->workqueue);
1087
1088 ath10k_mac_destroy(ar);
1089}
1090EXPORT_SYMBOL(ath10k_core_destroy);
1091
1036MODULE_AUTHOR("Qualcomm Atheros"); 1092MODULE_AUTHOR("Qualcomm Atheros");
1037MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); 1093MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
1038MODULE_LICENSE("Dual BSD/GPL"); 1094MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0e71979d837c..68ceef61933d 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
119 u8 peer_macaddr[ETH_ALEN]; 119 u8 peer_macaddr[ETH_ALEN];
120 u32 peer_rssi; 120 u32 peer_rssi;
121 u32 peer_tx_rate; 121 u32 peer_tx_rate;
122 u32 peer_rx_rate; /* 10x only */
122}; 123};
123 124
124struct ath10k_target_stats { 125struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
130 u32 cycle_count; 131 u32 cycle_count;
131 u32 phy_err_count; 132 u32 phy_err_count;
132 u32 chan_tx_power; 133 u32 chan_tx_power;
134 u32 ack_rx_bad;
135 u32 rts_bad;
136 u32 rts_good;
137 u32 fcs_bad;
138 u32 no_beacons;
139 u32 mib_int_count;
133 140
134 /* PDEV TX stats */ 141 /* PDEV TX stats */
135 s32 comp_queued; 142 s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
260 u8 fixed_rate; 267 u8 fixed_rate;
261 u8 fixed_nss; 268 u8 fixed_nss;
262 u8 force_sgi; 269 u8 force_sgi;
270 bool use_cts_prot;
271 int num_legacy_stations;
263}; 272};
264 273
265struct ath10k_vif_iter { 274struct ath10k_vif_iter {
@@ -326,6 +335,7 @@ enum ath10k_dev_flags {
326 /* Indicates that ath10k device is during CAC phase of DFS */ 335 /* Indicates that ath10k device is during CAC phase of DFS */
327 ATH10K_CAC_RUNNING, 336 ATH10K_CAC_RUNNING,
328 ATH10K_FLAG_FIRST_BOOT_DONE, 337 ATH10K_FLAG_FIRST_BOOT_DONE,
338 ATH10K_FLAG_CORE_REGISTERED,
329}; 339};
330 340
331struct ath10k { 341struct ath10k {
@@ -419,13 +429,24 @@ struct ath10k {
419 struct cfg80211_chan_def chandef; 429 struct cfg80211_chan_def chandef;
420 430
421 int free_vdev_map; 431 int free_vdev_map;
432 bool promisc;
433 bool monitor;
422 int monitor_vdev_id; 434 int monitor_vdev_id;
423 bool monitor_enabled; 435 bool monitor_started;
424 bool monitor_present;
425 unsigned int filter_flags; 436 unsigned int filter_flags;
426 unsigned long dev_flags; 437 unsigned long dev_flags;
427 u32 dfs_block_radar_events; 438 u32 dfs_block_radar_events;
428 439
440 /* protected by conf_mutex */
441 bool radar_enabled;
442 int num_started_vdevs;
443
444 /* Protected by conf-mutex */
445 u8 supp_tx_chainmask;
446 u8 supp_rx_chainmask;
447 u8 cfg_tx_chainmask;
448 u8 cfg_rx_chainmask;
449
429 struct wmi_pdev_set_wmm_params_arg wmm_params; 450 struct wmi_pdev_set_wmm_params_arg wmm_params;
430 struct completion install_key_done; 451 struct completion install_key_done;
431 452
@@ -456,6 +477,7 @@ struct ath10k {
456 477
457 enum ath10k_state state; 478 enum ath10k_state state;
458 479
480 struct work_struct register_work;
459 struct work_struct restart_work; 481 struct work_struct restart_work;
460 482
461 /* cycle count is reported twice for each visited channel during scan. 483 /* cycle count is reported twice for each visited channel during scan.
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6debd281350a..1b7ff4ba122c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
161 u8 *tmp = ev->data; 161 u8 *tmp = ev->data;
162 struct ath10k_target_stats *stats; 162 struct ath10k_target_stats *stats;
163 int num_pdev_stats, num_vdev_stats, num_peer_stats; 163 int num_pdev_stats, num_vdev_stats, num_peer_stats;
164 struct wmi_pdev_stats *ps; 164 struct wmi_pdev_stats_10x *ps;
165 int i; 165 int i;
166 166
167 spin_lock_bh(&ar->data_lock); 167 spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
173 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ 173 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
174 174
175 if (num_pdev_stats) { 175 if (num_pdev_stats) {
176 ps = (struct wmi_pdev_stats *)tmp; 176 ps = (struct wmi_pdev_stats_10x *)tmp;
177 177
178 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); 178 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
179 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); 179 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
228 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); 228 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
229 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); 229 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
230 230
231 tmp += sizeof(struct wmi_pdev_stats); 231 if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
232 ar->fw_features)) {
233 stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
234 stats->rts_bad = __le32_to_cpu(ps->rts_bad);
235 stats->rts_good = __le32_to_cpu(ps->rts_good);
236 stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
237 stats->no_beacons = __le32_to_cpu(ps->no_beacons);
238 stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
239 tmp += sizeof(struct wmi_pdev_stats_10x);
240 } else {
241 tmp += sizeof(struct wmi_pdev_stats_old);
242 }
232 } 243 }
233 244
234 /* 0 or max vdevs */ 245 /* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
243 } 254 }
244 255
245 if (num_peer_stats) { 256 if (num_peer_stats) {
246 struct wmi_peer_stats *peer_stats; 257 struct wmi_peer_stats_10x *peer_stats;
247 struct ath10k_peer_stat *s; 258 struct ath10k_peer_stat *s;
248 259
249 stats->peers = num_peer_stats; 260 stats->peers = num_peer_stats;
250 261
251 for (i = 0; i < num_peer_stats; i++) { 262 for (i = 0; i < num_peer_stats; i++) {
252 peer_stats = (struct wmi_peer_stats *)tmp; 263 peer_stats = (struct wmi_peer_stats_10x *)tmp;
253 s = &stats->peer_stat[i]; 264 s = &stats->peer_stat[i];
254 265
255 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, 266 memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
256 s->peer_macaddr); 267 ETH_ALEN);
257 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); 268 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
258 s->peer_tx_rate = 269 s->peer_tx_rate =
259 __le32_to_cpu(peer_stats->peer_tx_rate); 270 __le32_to_cpu(peer_stats->peer_tx_rate);
260 271 if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
261 tmp += sizeof(struct wmi_peer_stats); 272 ar->fw_features)) {
273 s->peer_rx_rate =
274 __le32_to_cpu(peer_stats->peer_rx_rate);
275 tmp += sizeof(struct wmi_peer_stats_10x);
276
277 } else {
278 tmp += sizeof(struct wmi_peer_stats_old);
279 }
262 } 280 }
263 } 281 }
264 282
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
272 struct ath10k *ar = file->private_data; 290 struct ath10k *ar = file->private_data;
273 struct ath10k_target_stats *fw_stats; 291 struct ath10k_target_stats *fw_stats;
274 char *buf = NULL; 292 char *buf = NULL;
275 unsigned int len = 0, buf_len = 2500; 293 unsigned int len = 0, buf_len = 8000;
276 ssize_t ret_cnt = 0; 294 ssize_t ret_cnt = 0;
277 long left; 295 long left;
278 int i; 296 int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
320 "Cycle count", fw_stats->cycle_count); 338 "Cycle count", fw_stats->cycle_count);
321 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 339 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
322 "PHY error count", fw_stats->phy_err_count); 340 "PHY error count", fw_stats->phy_err_count);
341 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
342 "RTS bad count", fw_stats->rts_bad);
343 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
344 "RTS good count", fw_stats->rts_good);
345 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
346 "FCS bad count", fw_stats->fcs_bad);
347 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
348 "No beacon count", fw_stats->no_beacons);
349 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
350 "MIB int count", fw_stats->mib_int_count);
323 351
324 len += scnprintf(buf + len, buf_len - len, "\n"); 352 len += scnprintf(buf + len, buf_len - len, "\n");
325 len += scnprintf(buf + len, buf_len - len, "%30s\n", 353 len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
411 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); 439 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
412 440
413 len += scnprintf(buf + len, buf_len - len, "\n"); 441 len += scnprintf(buf + len, buf_len - len, "\n");
414 len += scnprintf(buf + len, buf_len - len, "%30s\n", 442 len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
415 "ath10k PEER stats"); 443 "ath10k PEER stats", fw_stats->peers);
416 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 444 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
417 "================="); 445 "=================");
418 446
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
425 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 453 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
426 "Peer TX rate", 454 "Peer TX rate",
427 fw_stats->peer_stat[i].peer_tx_rate); 455 fw_stats->peer_stat[i].peer_tx_rate);
456 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
457 "Peer RX rate",
458 fw_stats->peer_stat[i].peer_rx_rate);
428 len += scnprintf(buf + len, buf_len - len, "\n"); 459 len += scnprintf(buf + len, buf_len - len, "\n");
429 } 460 }
430 spin_unlock_bh(&ar->data_lock); 461 spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
451 char __user *user_buf, 482 char __user *user_buf,
452 size_t count, loff_t *ppos) 483 size_t count, loff_t *ppos)
453{ 484{
454 const char buf[] = "To simulate firmware crash write the keyword" 485 const char buf[] = "To simulate firmware crash write one of the"
455 " `crash` to this file.\nThis will force firmware" 486 " keywords to this file:\n `soft` - this will send"
456 " to report a crash to the host system.\n"; 487 " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
488 " supports that command.\n `hard` - this will send"
489 " to firmware command with illegal parameters"
490 " causing firmware crash.\n";
491
457 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 492 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
458} 493}
459 494
495/* Simulate firmware crash:
496 * 'soft': Call wmi command causing firmware hang. This firmware hang is
497 * recoverable by warm firmware reset.
498 * 'hard': Force firmware crash by setting any vdev parameter for not allowed
499 * vdev id. This is hard firmware crash because it is recoverable only by cold
500 * firmware reset.
501 */
460static ssize_t ath10k_write_simulate_fw_crash(struct file *file, 502static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
461 const char __user *user_buf, 503 const char __user *user_buf,
462 size_t count, loff_t *ppos) 504 size_t count, loff_t *ppos)
463{ 505{
464 struct ath10k *ar = file->private_data; 506 struct ath10k *ar = file->private_data;
465 char buf[32] = {}; 507 char buf[32];
466 int ret; 508 int ret;
467 509
468 mutex_lock(&ar->conf_mutex); 510 mutex_lock(&ar->conf_mutex);
469 511
470 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); 512 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
471 if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) { 513
472 ret = -EINVAL; 514 /* make sure that buf is null terminated */
473 goto exit; 515 buf[sizeof(buf) - 1] = 0;
474 }
475 516
476 if (ar->state != ATH10K_STATE_ON && 517 if (ar->state != ATH10K_STATE_ON &&
477 ar->state != ATH10K_STATE_RESTARTED) { 518 ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
479 goto exit; 520 goto exit;
480 } 521 }
481 522
482 ath10k_info("simulating firmware crash\n"); 523 /* drop the possible '\n' from the end */
524 if (buf[count - 1] == '\n') {
525 buf[count - 1] = 0;
526 count--;
527 }
483 528
484 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); 529 if (!strcmp(buf, "soft")) {
485 if (ret) 530 ath10k_info("simulating soft firmware crash\n");
486 ath10k_warn("failed to force fw hang (%d)\n", ret); 531 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
532 } else if (!strcmp(buf, "hard")) {
533 ath10k_info("simulating hard firmware crash\n");
534 ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
535 ar->wmi.vdev_param->rts_threshold, 0);
536 } else {
537 ret = -EINVAL;
538 goto exit;
539 }
540
541 if (ret) {
542 ath10k_warn("failed to simulate firmware crash: %d\n", ret);
543 goto exit;
544 }
487 545
488 if (ret == 0) 546 ret = count;
489 ret = count;
490 547
491exit: 548exit:
492 mutex_unlock(&ar->conf_mutex); 549 mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 7f1bccd3597f..e493db4b4a41 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
157 goto err_pull; 157 goto err_pull;
158 } 158 }
159 ep->tx_credits -= credits; 159 ep->tx_credits -= credits;
160 ath10k_dbg(ATH10K_DBG_HTC,
161 "htc ep %d consumed %d credits (total %d)\n",
162 eid, credits, ep->tx_credits);
160 spin_unlock_bh(&htc->tx_lock); 163 spin_unlock_bh(&htc->tx_lock);
161 } 164 }
162 165
@@ -185,6 +188,9 @@ err_credits:
185 if (ep->tx_credit_flow_enabled) { 188 if (ep->tx_credit_flow_enabled) {
186 spin_lock_bh(&htc->tx_lock); 189 spin_lock_bh(&htc->tx_lock);
187 ep->tx_credits += credits; 190 ep->tx_credits += credits;
191 ath10k_dbg(ATH10K_DBG_HTC,
192 "htc ep %d reverted %d credits back (total %d)\n",
193 eid, credits, ep->tx_credits);
188 spin_unlock_bh(&htc->tx_lock); 194 spin_unlock_bh(&htc->tx_lock);
189 195
190 if (ep->ep_ops.ep_tx_credits) 196 if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
234 if (report->eid >= ATH10K_HTC_EP_COUNT) 240 if (report->eid >= ATH10K_HTC_EP_COUNT)
235 break; 241 break;
236 242
237 ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
238 report->eid, report->credits);
239
240 ep = &htc->endpoint[report->eid]; 243 ep = &htc->endpoint[report->eid];
241 ep->tx_credits += report->credits; 244 ep->tx_credits += report->credits;
242 245
246 ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
247 report->eid, report->credits, ep->tx_credits);
248
243 if (ep->ep_ops.ep_tx_credits) { 249 if (ep->ep_ops.ep_tx_credits) {
244 spin_unlock_bh(&htc->tx_lock); 250 spin_unlock_bh(&htc->tx_lock);
245 ep->ep_ops.ep_tx_credits(htc->ar); 251 ep->ep_ops.ep_tx_credits(htc->ar);
@@ -824,17 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc)
824 return 0; 830 return 0;
825} 831}
826 832
827/*
828 * stop HTC communications, i.e. stop interrupt reception, and flush all
829 * queued buffers
830 */
831void ath10k_htc_stop(struct ath10k_htc *htc) 833void ath10k_htc_stop(struct ath10k_htc *htc)
832{ 834{
833 spin_lock_bh(&htc->tx_lock); 835 spin_lock_bh(&htc->tx_lock);
834 htc->stopped = true; 836 htc->stopped = true;
835 spin_unlock_bh(&htc->tx_lock); 837 spin_unlock_bh(&htc->tx_lock);
836
837 ath10k_hif_stop(htc->ar);
838} 838}
839 839
840/* registered target arrival callback from the HIF layer */ 840/* registered target arrival callback from the HIF layer */
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 69697af59ce0..19c12cc8d663 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,7 +22,7 @@
22#include "core.h" 22#include "core.h"
23#include "debug.h" 23#include "debug.h"
24 24
25static int ath10k_htt_htc_attach(struct ath10k_htt *htt) 25int ath10k_htt_connect(struct ath10k_htt *htt)
26{ 26{
27 struct ath10k_htc_svc_conn_req conn_req; 27 struct ath10k_htc_svc_conn_req conn_req;
28 struct ath10k_htc_svc_conn_resp conn_resp; 28 struct ath10k_htc_svc_conn_resp conn_resp;
@@ -48,39 +48,14 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
48 return 0; 48 return 0;
49} 49}
50 50
51int ath10k_htt_attach(struct ath10k *ar) 51int ath10k_htt_init(struct ath10k *ar)
52{ 52{
53 struct ath10k_htt *htt = &ar->htt; 53 struct ath10k_htt *htt = &ar->htt;
54 int ret;
55 54
56 htt->ar = ar; 55 htt->ar = ar;
57 htt->max_throughput_mbps = 800; 56 htt->max_throughput_mbps = 800;
58 57
59 /* 58 /*
60 * Connect to HTC service.
61 * This has to be done before calling ath10k_htt_rx_attach,
62 * since ath10k_htt_rx_attach involves sending a rx ring configure
63 * message to the target.
64 */
65 ret = ath10k_htt_htc_attach(htt);
66 if (ret) {
67 ath10k_err("could not attach htt htc (%d)\n", ret);
68 goto err_htc_attach;
69 }
70
71 ret = ath10k_htt_tx_attach(htt);
72 if (ret) {
73 ath10k_err("could not attach htt tx (%d)\n", ret);
74 goto err_htc_attach;
75 }
76
77 ret = ath10k_htt_rx_attach(htt);
78 if (ret) {
79 ath10k_err("could not attach htt rx (%d)\n", ret);
80 goto err_rx_attach;
81 }
82
83 /*
84 * Prefetch enough data to satisfy target 59 * Prefetch enough data to satisfy target
85 * classification engine. 60 * classification engine.
86 * This is for LL chips. HL chips will probably 61 * This is for LL chips. HL chips will probably
@@ -93,11 +68,6 @@ int ath10k_htt_attach(struct ath10k *ar)
93 2; /* ip4 dscp or ip6 priority */ 68 2; /* ip4 dscp or ip6 priority */
94 69
95 return 0; 70 return 0;
96
97err_rx_attach:
98 ath10k_htt_tx_detach(htt);
99err_htc_attach:
100 return ret;
101} 71}
102 72
103#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) 73#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -117,7 +87,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
117 return 0; 87 return 0;
118} 88}
119 89
120int ath10k_htt_attach_target(struct ath10k_htt *htt) 90int ath10k_htt_setup(struct ath10k_htt *htt)
121{ 91{
122 int status; 92 int status;
123 93
@@ -140,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt)
140 110
141 return ath10k_htt_send_rx_ring_cfg_ll(htt); 111 return ath10k_htt_send_rx_ring_cfg_ll(htt);
142} 112}
143
144void ath10k_htt_detach(struct ath10k_htt *htt)
145{
146 ath10k_htt_rx_detach(htt);
147 ath10k_htt_tx_detach(htt);
148}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 654867fc1ae7..9a263462c793 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h> 23#include <linux/dmapool.h>
24#include <net/mac80211.h>
24 25
25#include "htc.h" 26#include "htc.h"
26#include "rx_desc.h" 27#include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
1172 u16 peer_id; 1173 u16 peer_id;
1173}; 1174};
1174 1175
1175struct htt_rx_info {
1176 struct sk_buff *skb;
1177 enum htt_rx_mpdu_status status;
1178 enum htt_rx_mpdu_encrypt_type encrypt_type;
1179 s8 signal;
1180 struct {
1181 u8 info0;
1182 u32 info1;
1183 u32 info2;
1184 } rate;
1185
1186 u32 tsf;
1187 bool fcs_err;
1188 bool amsdu_more;
1189 bool mic_err;
1190};
1191
1192struct ath10k_htt_txbuf { 1176struct ath10k_htt_txbuf {
1193 struct htt_data_tx_desc_frag frags[2]; 1177 struct htt_data_tx_desc_frag frags[2];
1194 struct ath10k_htc_hdr htc_hdr; 1178 struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
1289 struct tasklet_struct txrx_compl_task; 1273 struct tasklet_struct txrx_compl_task;
1290 struct sk_buff_head tx_compl_q; 1274 struct sk_buff_head tx_compl_q;
1291 struct sk_buff_head rx_compl_q; 1275 struct sk_buff_head rx_compl_q;
1276
1277 /* rx_status template */
1278 struct ieee80211_rx_status rx_status;
1292}; 1279};
1293 1280
1294#define RX_HTT_HDR_STATUS_LEN 64 1281#define RX_HTT_HDR_STATUS_LEN 64
@@ -1341,14 +1328,16 @@ struct htt_rx_desc {
1341#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ 1328#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
1342#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) 1329#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
1343 1330
1344int ath10k_htt_attach(struct ath10k *ar); 1331int ath10k_htt_connect(struct ath10k_htt *htt);
1345int ath10k_htt_attach_target(struct ath10k_htt *htt); 1332int ath10k_htt_init(struct ath10k *ar);
1346void ath10k_htt_detach(struct ath10k_htt *htt); 1333int ath10k_htt_setup(struct ath10k_htt *htt);
1334
1335int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
1336void ath10k_htt_tx_free(struct ath10k_htt *htt);
1337
1338int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
1339void ath10k_htt_rx_free(struct ath10k_htt *htt);
1347 1340
1348int ath10k_htt_tx_attach(struct ath10k_htt *htt);
1349void ath10k_htt_tx_detach(struct ath10k_htt *htt);
1350int ath10k_htt_rx_attach(struct ath10k_htt *htt);
1351void ath10k_htt_rx_detach(struct ath10k_htt *htt);
1352void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1341void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1353void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); 1342void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1354int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); 1343int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index cdcbe2de95f9..6c102b1312ff 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
225 ath10k_htt_rx_msdu_buff_replenish(htt); 225 ath10k_htt_rx_msdu_buff_replenish(htt);
226} 226}
227 227
228void ath10k_htt_rx_detach(struct ath10k_htt *htt) 228static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
229{ 229{
230 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 230 struct sk_buff *skb;
231 int i;
232
233 for (i = 0; i < htt->rx_ring.size; i++) {
234 skb = htt->rx_ring.netbufs_ring[i];
235 if (!skb)
236 continue;
231 237
238 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
239 skb->len + skb_tailroom(skb),
240 DMA_FROM_DEVICE);
241 dev_kfree_skb_any(skb);
242 htt->rx_ring.netbufs_ring[i] = NULL;
243 }
244}
245
246void ath10k_htt_rx_free(struct ath10k_htt *htt)
247{
232 del_timer_sync(&htt->rx_ring.refill_retry_timer); 248 del_timer_sync(&htt->rx_ring.refill_retry_timer);
233 tasklet_kill(&htt->rx_replenish_task); 249 tasklet_kill(&htt->rx_replenish_task);
234 tasklet_kill(&htt->txrx_compl_task); 250 tasklet_kill(&htt->txrx_compl_task);
@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
236 skb_queue_purge(&htt->tx_compl_q); 252 skb_queue_purge(&htt->tx_compl_q);
237 skb_queue_purge(&htt->rx_compl_q); 253 skb_queue_purge(&htt->rx_compl_q);
238 254
239 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 255 ath10k_htt_rx_ring_clean_up(htt);
240 struct sk_buff *skb =
241 htt->rx_ring.netbufs_ring[sw_rd_idx];
242 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
243
244 dma_unmap_single(htt->ar->dev, cb->paddr,
245 skb->len + skb_tailroom(skb),
246 DMA_FROM_DEVICE);
247 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
248 sw_rd_idx++;
249 sw_rd_idx &= htt->rx_ring.size_mask;
250 }
251 256
252 dma_free_coherent(htt->ar->dev, 257 dma_free_coherent(htt->ar->dev,
253 (htt->rx_ring.size * 258 (htt->rx_ring.size *
@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
277 282
278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 283 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
279 msdu = htt->rx_ring.netbufs_ring[idx]; 284 msdu = htt->rx_ring.netbufs_ring[idx];
285 htt->rx_ring.netbufs_ring[idx] = NULL;
280 286
281 idx++; 287 idx++;
282 idx &= htt->rx_ring.size_mask; 288 idx &= htt->rx_ring.size_mask;
@@ -297,6 +303,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
297 } 303 }
298} 304}
299 305
306/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
300static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 307static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
301 u8 **fw_desc, int *fw_desc_len, 308 u8 **fw_desc, int *fw_desc_len,
302 struct sk_buff **head_msdu, 309 struct sk_buff **head_msdu,
@@ -305,12 +312,13 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
305 int msdu_len, msdu_chaining = 0; 312 int msdu_len, msdu_chaining = 0;
306 struct sk_buff *msdu; 313 struct sk_buff *msdu;
307 struct htt_rx_desc *rx_desc; 314 struct htt_rx_desc *rx_desc;
315 bool corrupted = false;
308 316
309 lockdep_assert_held(&htt->rx_ring.lock); 317 lockdep_assert_held(&htt->rx_ring.lock);
310 318
311 if (htt->rx_confused) { 319 if (htt->rx_confused) {
312 ath10k_warn("htt is confused. refusing rx\n"); 320 ath10k_warn("htt is confused. refusing rx\n");
313 return 0; 321 return -1;
314 } 322 }
315 323
316 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); 324 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -398,7 +406,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
398 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), 406 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
399 RX_MSDU_START_INFO0_MSDU_LENGTH); 407 RX_MSDU_START_INFO0_MSDU_LENGTH);
400 msdu_chained = rx_desc->frag_info.ring2_more_count; 408 msdu_chained = rx_desc->frag_info.ring2_more_count;
401 msdu_chaining = msdu_chained;
402 409
403 if (msdu_len_invalid) 410 if (msdu_len_invalid)
404 msdu_len = 0; 411 msdu_len = 0;
@@ -426,11 +433,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
426 433
427 msdu->next = next; 434 msdu->next = next;
428 msdu = next; 435 msdu = next;
436 msdu_chaining = 1;
429 } 437 }
430 438
431 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
432 RX_MSDU_END_INFO0_LAST_MSDU; 440 RX_MSDU_END_INFO0_LAST_MSDU;
433 441
442 if (msdu_chaining && !last_msdu)
443 corrupted = true;
444
434 if (last_msdu) { 445 if (last_msdu) {
435 msdu->next = NULL; 446 msdu->next = NULL;
436 break; 447 break;
@@ -442,6 +453,23 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
442 } 453 }
443 *tail_msdu = msdu; 454 *tail_msdu = msdu;
444 455
456 if (*head_msdu == NULL)
457 msdu_chaining = -1;
458
459 /*
460 * Apparently FW sometimes reports weird chained MSDU sequences with
461 * more than one rx descriptor. This seems like a bug but needs more
462 * analyzing. For the time being fix it by dropping such sequences to
463 * avoid blowing up the host system.
464 */
465 if (corrupted) {
466 ath10k_warn("failed to pop chained msdus, dropping\n");
467 ath10k_htt_rx_free_msdu_chain(*head_msdu);
468 *head_msdu = NULL;
469 *tail_msdu = NULL;
470 msdu_chaining = -EINVAL;
471 }
472
445 /* 473 /*
446 * Don't refill the ring yet. 474 * Don't refill the ring yet.
447 * 475 *
@@ -464,7 +492,7 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
464 ath10k_htt_rx_msdu_buff_replenish(htt); 492 ath10k_htt_rx_msdu_buff_replenish(htt);
465} 493}
466 494
467int ath10k_htt_rx_attach(struct ath10k_htt *htt) 495int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
468{ 496{
469 dma_addr_t paddr; 497 dma_addr_t paddr;
470 void *vaddr; 498 void *vaddr;
@@ -490,7 +518,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
490 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); 518 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
491 519
492 htt->rx_ring.netbufs_ring = 520 htt->rx_ring.netbufs_ring =
493 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), 521 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
494 GFP_KERNEL); 522 GFP_KERNEL);
495 if (!htt->rx_ring.netbufs_ring) 523 if (!htt->rx_ring.netbufs_ring)
496 goto err_netbuf; 524 goto err_netbuf;
@@ -636,6 +664,203 @@ struct amsdu_subframe_hdr {
636 __be16 len; 664 __be16 len;
637} __packed; 665} __packed;
638 666
667static const u8 rx_legacy_rate_idx[] = {
668 3, /* 0x00 - 11Mbps */
669 2, /* 0x01 - 5.5Mbps */
670 1, /* 0x02 - 2Mbps */
671 0, /* 0x03 - 1Mbps */
672 3, /* 0x04 - 11Mbps */
673 2, /* 0x05 - 5.5Mbps */
674 1, /* 0x06 - 2Mbps */
675 0, /* 0x07 - 1Mbps */
676 10, /* 0x08 - 48Mbps */
677 8, /* 0x09 - 24Mbps */
678 6, /* 0x0A - 12Mbps */
679 4, /* 0x0B - 6Mbps */
680 11, /* 0x0C - 54Mbps */
681 9, /* 0x0D - 36Mbps */
682 7, /* 0x0E - 18Mbps */
683 5, /* 0x0F - 9Mbps */
684};
685
686static void ath10k_htt_rx_h_rates(struct ath10k *ar,
687 enum ieee80211_band band,
688 u8 info0, u32 info1, u32 info2,
689 struct ieee80211_rx_status *status)
690{
691 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
692 u8 preamble = 0;
693
694 /* Check if valid fields */
695 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
696 return;
697
698 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
699
700 switch (preamble) {
701 case HTT_RX_LEGACY:
702 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
703 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
704 rate_idx = 0;
705
706 if (rate < 0x08 || rate > 0x0F)
707 break;
708
709 switch (band) {
710 case IEEE80211_BAND_2GHZ:
711 if (cck)
712 rate &= ~BIT(3);
713 rate_idx = rx_legacy_rate_idx[rate];
714 break;
715 case IEEE80211_BAND_5GHZ:
716 rate_idx = rx_legacy_rate_idx[rate];
717 /* We are using same rate table registering
718 HW - ath10k_rates[]. In case of 5GHz skip
719 CCK rates, so -4 here */
720 rate_idx -= 4;
721 break;
722 default:
723 break;
724 }
725
726 status->rate_idx = rate_idx;
727 break;
728 case HTT_RX_HT:
729 case HTT_RX_HT_WITH_TXBF:
730 /* HT-SIG - Table 20-11 in info1 and info2 */
731 mcs = info1 & 0x1F;
732 nss = mcs >> 3;
733 bw = (info1 >> 7) & 1;
734 sgi = (info2 >> 7) & 1;
735
736 status->rate_idx = mcs;
737 status->flag |= RX_FLAG_HT;
738 if (sgi)
739 status->flag |= RX_FLAG_SHORT_GI;
740 if (bw)
741 status->flag |= RX_FLAG_40MHZ;
742 break;
743 case HTT_RX_VHT:
744 case HTT_RX_VHT_WITH_TXBF:
745 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
746 TODO check this */
747 mcs = (info2 >> 4) & 0x0F;
748 nss = ((info1 >> 10) & 0x07) + 1;
749 bw = info1 & 3;
750 sgi = info2 & 1;
751
752 status->rate_idx = mcs;
753 status->vht_nss = nss;
754
755 if (sgi)
756 status->flag |= RX_FLAG_SHORT_GI;
757
758 switch (bw) {
759 /* 20MHZ */
760 case 0:
761 break;
762 /* 40MHZ */
763 case 1:
764 status->flag |= RX_FLAG_40MHZ;
765 break;
766 /* 80MHZ */
767 case 2:
768 status->vht_flag |= RX_VHT_FLAG_80MHZ;
769 }
770
771 status->flag |= RX_FLAG_VHT;
772 break;
773 default:
774 break;
775 }
776}
777
778static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
779 struct ieee80211_rx_status *rx_status,
780 struct sk_buff *skb,
781 enum htt_rx_mpdu_encrypt_type enctype,
782 enum rx_msdu_decap_format fmt,
783 bool dot11frag)
784{
785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
786
787 rx_status->flag &= ~(RX_FLAG_DECRYPTED |
788 RX_FLAG_IV_STRIPPED |
789 RX_FLAG_MMIC_STRIPPED);
790
791 if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
792 return;
793
794 /*
795 * There's no explicit rx descriptor flag to indicate whether a given
796 * frame has been decrypted or not. We're forced to use the decap
797 * format as an implicit indication. However fragmentation rx is always
798 * raw and it probably never reports undecrypted raws.
799 *
800 * This makes sure sniffed frames are reported as-is without stripping
801 * the protected flag.
802 */
803 if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
804 return;
805
806 rx_status->flag |= RX_FLAG_DECRYPTED |
807 RX_FLAG_IV_STRIPPED |
808 RX_FLAG_MMIC_STRIPPED;
809 hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
810 ~IEEE80211_FCTL_PROTECTED);
811}
812
813static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
814 struct ieee80211_rx_status *status)
815{
816 struct ieee80211_channel *ch;
817
818 spin_lock_bh(&ar->data_lock);
819 ch = ar->scan_channel;
820 if (!ch)
821 ch = ar->rx_channel;
822 spin_unlock_bh(&ar->data_lock);
823
824 if (!ch)
825 return false;
826
827 status->band = ch->band;
828 status->freq = ch->center_freq;
829
830 return true;
831}
832
833static void ath10k_process_rx(struct ath10k *ar,
834 struct ieee80211_rx_status *rx_status,
835 struct sk_buff *skb)
836{
837 struct ieee80211_rx_status *status;
838
839 status = IEEE80211_SKB_RXCB(skb);
840 *status = *rx_status;
841
842 ath10k_dbg(ATH10K_DBG_DATA,
843 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
844 skb,
845 skb->len,
846 status->flag == 0 ? "legacy" : "",
847 status->flag & RX_FLAG_HT ? "ht" : "",
848 status->flag & RX_FLAG_VHT ? "vht" : "",
849 status->flag & RX_FLAG_40MHZ ? "40" : "",
850 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
851 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
852 status->rate_idx,
853 status->vht_nss,
854 status->freq,
855 status->band, status->flag,
856 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
857 !!(status->flag & RX_FLAG_MMIC_ERROR));
858 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
859 skb->data, skb->len);
860
861 ieee80211_rx(ar->hw, skb);
862}
863
639static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) 864static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
640{ 865{
641 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ 866 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +868,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
643} 868}
644 869
645static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 870static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
646 struct htt_rx_info *info) 871 struct ieee80211_rx_status *rx_status,
872 struct sk_buff *skb_in)
647{ 873{
648 struct htt_rx_desc *rxd; 874 struct htt_rx_desc *rxd;
875 struct sk_buff *skb = skb_in;
649 struct sk_buff *first; 876 struct sk_buff *first;
650 struct sk_buff *skb = info->skb;
651 enum rx_msdu_decap_format fmt; 877 enum rx_msdu_decap_format fmt;
652 enum htt_rx_mpdu_encrypt_type enctype; 878 enum htt_rx_mpdu_encrypt_type enctype;
653 struct ieee80211_hdr *hdr; 879 struct ieee80211_hdr *hdr;
@@ -728,24 +954,28 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
728 break; 954 break;
729 } 955 }
730 956
731 info->skb = skb; 957 skb_in = skb;
732 info->encrypt_type = enctype; 958 ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
959 false);
733 skb = skb->next; 960 skb = skb->next;
734 info->skb->next = NULL; 961 skb_in->next = NULL;
735 962
736 if (skb) 963 if (skb)
737 info->amsdu_more = true; 964 rx_status->flag |= RX_FLAG_AMSDU_MORE;
965 else
966 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
738 967
739 ath10k_process_rx(htt->ar, info); 968 ath10k_process_rx(htt->ar, rx_status, skb_in);
740 } 969 }
741 970
742 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a 971 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
743 * monitor interface active for sniffing purposes. */ 972 * monitor interface active for sniffing purposes. */
744} 973}
745 974
746static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) 975static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
976 struct ieee80211_rx_status *rx_status,
977 struct sk_buff *skb)
747{ 978{
748 struct sk_buff *skb = info->skb;
749 struct htt_rx_desc *rxd; 979 struct htt_rx_desc *rxd;
750 struct ieee80211_hdr *hdr; 980 struct ieee80211_hdr *hdr;
751 enum rx_msdu_decap_format fmt; 981 enum rx_msdu_decap_format fmt;
@@ -808,66 +1038,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
808 break; 1038 break;
809 } 1039 }
810 1040
811 info->skb = skb; 1041 ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
812 info->encrypt_type = enctype;
813 1042
814 ath10k_process_rx(htt->ar, info); 1043 ath10k_process_rx(htt->ar, rx_status, skb);
815}
816
817static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
818{
819 struct htt_rx_desc *rxd;
820 u32 flags;
821
822 rxd = (void *)skb->data - sizeof(*rxd);
823 flags = __le32_to_cpu(rxd->attention.flags);
824
825 if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
826 return true;
827
828 return false;
829}
830
831static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
832{
833 struct htt_rx_desc *rxd;
834 u32 flags;
835
836 rxd = (void *)skb->data - sizeof(*rxd);
837 flags = __le32_to_cpu(rxd->attention.flags);
838
839 if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
840 return true;
841
842 return false;
843}
844
845static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
846{
847 struct htt_rx_desc *rxd;
848 u32 flags;
849
850 rxd = (void *)skb->data - sizeof(*rxd);
851 flags = __le32_to_cpu(rxd->attention.flags);
852
853 if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
854 return true;
855
856 return false;
857}
858
859static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
860{
861 struct htt_rx_desc *rxd;
862 u32 flags;
863
864 rxd = (void *)skb->data - sizeof(*rxd);
865 flags = __le32_to_cpu(rxd->attention.flags);
866
867 if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
868 return true;
869
870 return false;
871} 1044}
872 1045
873static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 1046static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1125,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
952 return 0; 1125 return 0;
953} 1126}
954 1127
1128static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
1129 struct sk_buff *head,
1130 enum htt_rx_mpdu_status status,
1131 bool channel_set,
1132 u32 attention)
1133{
1134 if (head->len == 0) {
1135 ath10k_dbg(ATH10K_DBG_HTT,
1136 "htt rx dropping due to zero-len\n");
1137 return false;
1138 }
1139
1140 if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
1141 ath10k_dbg(ATH10K_DBG_HTT,
1142 "htt rx dropping due to decrypt-err\n");
1143 return false;
1144 }
1145
1146 if (!channel_set) {
1147 ath10k_warn("no channel configured; ignoring frame!\n");
1148 return false;
1149 }
1150
1151 /* Skip mgmt frames while we handle this in WMI */
1152 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1153 attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
1154 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1155 return false;
1156 }
1157
1158 if (status != HTT_RX_IND_MPDU_STATUS_OK &&
1159 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
1160 status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
1161 !htt->ar->monitor_started) {
1162 ath10k_dbg(ATH10K_DBG_HTT,
1163 "htt rx ignoring frame w/ status %d\n",
1164 status);
1165 return false;
1166 }
1167
1168 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1169 ath10k_dbg(ATH10K_DBG_HTT,
1170 "htt rx CAC running\n");
1171 return false;
1172 }
1173
1174 return true;
1175}
1176
955static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 1177static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
956 struct htt_rx_indication *rx) 1178 struct htt_rx_indication *rx)
957{ 1179{
958 struct htt_rx_info info; 1180 struct ieee80211_rx_status *rx_status = &htt->rx_status;
959 struct htt_rx_indication_mpdu_range *mpdu_ranges; 1181 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1182 struct htt_rx_desc *rxd;
1183 enum htt_rx_mpdu_status status;
960 struct ieee80211_hdr *hdr; 1184 struct ieee80211_hdr *hdr;
961 int num_mpdu_ranges; 1185 int num_mpdu_ranges;
1186 u32 attention;
962 int fw_desc_len; 1187 int fw_desc_len;
963 u8 *fw_desc; 1188 u8 *fw_desc;
1189 bool channel_set;
964 int i, j; 1190 int i, j;
1191 int ret;
965 1192
966 lockdep_assert_held(&htt->rx_ring.lock); 1193 lockdep_assert_held(&htt->rx_ring.lock);
967 1194
968 memset(&info, 0, sizeof(info));
969
970 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 1195 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
971 fw_desc = (u8 *)&rx->fw_desc; 1196 fw_desc = (u8 *)&rx->fw_desc;
972 1197
@@ -974,106 +1199,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
974 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 1199 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
975 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 1200 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
976 1201
1202 /* Fill this once, while this is per-ppdu */
1203 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
1204 memset(rx_status, 0, sizeof(*rx_status));
1205 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1206 rx->ppdu.combined_rssi;
1207 }
1208
1209 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
1210 /* TSF available only in 32-bit */
1211 rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
1212 rx_status->flag |= RX_FLAG_MACTIME_END;
1213 }
1214
1215 channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
1216
1217 if (channel_set) {
1218 ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
1219 rx->ppdu.info0,
1220 __le32_to_cpu(rx->ppdu.info1),
1221 __le32_to_cpu(rx->ppdu.info2),
1222 rx_status);
1223 }
1224
977 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 1225 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
978 rx, sizeof(*rx) + 1226 rx, sizeof(*rx) +
979 (sizeof(struct htt_rx_indication_mpdu_range) * 1227 (sizeof(struct htt_rx_indication_mpdu_range) *
980 num_mpdu_ranges)); 1228 num_mpdu_ranges));
981 1229
982 for (i = 0; i < num_mpdu_ranges; i++) { 1230 for (i = 0; i < num_mpdu_ranges; i++) {
983 info.status = mpdu_ranges[i].mpdu_range_status; 1231 status = mpdu_ranges[i].mpdu_range_status;
984 1232
985 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { 1233 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
986 struct sk_buff *msdu_head, *msdu_tail; 1234 struct sk_buff *msdu_head, *msdu_tail;
987 enum htt_rx_mpdu_status status;
988 int msdu_chaining;
989 1235
990 msdu_head = NULL; 1236 msdu_head = NULL;
991 msdu_tail = NULL; 1237 msdu_tail = NULL;
992 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, 1238 ret = ath10k_htt_rx_amsdu_pop(htt,
993 &fw_desc, 1239 &fw_desc,
994 &fw_desc_len, 1240 &fw_desc_len,
995 &msdu_head, 1241 &msdu_head,
996 &msdu_tail); 1242 &msdu_tail);
997 1243
998 if (!msdu_head) { 1244 if (ret < 0) {
999 ath10k_warn("htt rx no data!\n"); 1245 ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
1000 continue; 1246 ret);
1001 }
1002
1003 if (msdu_head->len == 0) {
1004 ath10k_dbg(ATH10K_DBG_HTT,
1005 "htt rx dropping due to zero-len\n");
1006 ath10k_htt_rx_free_msdu_chain(msdu_head); 1247 ath10k_htt_rx_free_msdu_chain(msdu_head);
1007 continue; 1248 continue;
1008 } 1249 }
1009 1250
1010 if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { 1251 rxd = container_of((void *)msdu_head->data,
1011 ath10k_dbg(ATH10K_DBG_HTT, 1252 struct htt_rx_desc,
1012 "htt rx dropping due to decrypt-err\n"); 1253 msdu_payload);
1013 ath10k_htt_rx_free_msdu_chain(msdu_head); 1254 attention = __le32_to_cpu(rxd->attention.flags);
1014 continue;
1015 }
1016 1255
1017 status = info.status; 1256 if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
1018 1257 status,
1019 /* Skip mgmt frames while we handle this in WMI */ 1258 channel_set,
1020 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || 1259 attention)) {
1021 ath10k_htt_rx_is_mgmt(msdu_head)) {
1022 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1023 ath10k_htt_rx_free_msdu_chain(msdu_head); 1260 ath10k_htt_rx_free_msdu_chain(msdu_head);
1024 continue; 1261 continue;
1025 } 1262 }
1026 1263
1027 if (status != HTT_RX_IND_MPDU_STATUS_OK && 1264 if (ret > 0 &&
1028 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && 1265 ath10k_unchain_msdu(msdu_head) < 0) {
1029 status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
1030 !htt->ar->monitor_enabled) {
1031 ath10k_dbg(ATH10K_DBG_HTT,
1032 "htt rx ignoring frame w/ status %d\n",
1033 status);
1034 ath10k_htt_rx_free_msdu_chain(msdu_head); 1266 ath10k_htt_rx_free_msdu_chain(msdu_head);
1035 continue; 1267 continue;
1036 } 1268 }
1037 1269
1038 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { 1270 if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
1039 ath10k_dbg(ATH10K_DBG_HTT, 1271 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1040 "htt rx CAC running\n"); 1272 else
1041 ath10k_htt_rx_free_msdu_chain(msdu_head); 1273 rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
1042 continue;
1043 }
1044
1045 if (msdu_chaining &&
1046 (ath10k_unchain_msdu(msdu_head) < 0)) {
1047 ath10k_htt_rx_free_msdu_chain(msdu_head);
1048 continue;
1049 }
1050
1051 info.skb = msdu_head;
1052 info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
1053 info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
1054
1055 if (info.fcs_err)
1056 ath10k_dbg(ATH10K_DBG_HTT,
1057 "htt rx has FCS err\n");
1058
1059 if (info.mic_err)
1060 ath10k_dbg(ATH10K_DBG_HTT,
1061 "htt rx has MIC err\n");
1062
1063 info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
1064 info.signal += rx->ppdu.combined_rssi;
1065 1274
1066 info.rate.info0 = rx->ppdu.info0; 1275 if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
1067 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); 1276 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1068 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); 1277 else
1069 info.tsf = __le32_to_cpu(rx->ppdu.tsf); 1278 rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
1070 1279
1071 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 1280 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
1072 1281
1073 if (ath10k_htt_rx_hdr_is_amsdu(hdr)) 1282 if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1074 ath10k_htt_rx_amsdu(htt, &info); 1283 ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
1075 else 1284 else
1076 ath10k_htt_rx_msdu(htt, &info); 1285 ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
1077 } 1286 }
1078 } 1287 }
1079 1288
@@ -1084,11 +1293,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1084 struct htt_rx_fragment_indication *frag) 1293 struct htt_rx_fragment_indication *frag)
1085{ 1294{
1086 struct sk_buff *msdu_head, *msdu_tail; 1295 struct sk_buff *msdu_head, *msdu_tail;
1296 enum htt_rx_mpdu_encrypt_type enctype;
1087 struct htt_rx_desc *rxd; 1297 struct htt_rx_desc *rxd;
1088 enum rx_msdu_decap_format fmt; 1298 enum rx_msdu_decap_format fmt;
1089 struct htt_rx_info info = {}; 1299 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1090 struct ieee80211_hdr *hdr; 1300 struct ieee80211_hdr *hdr;
1091 int msdu_chaining; 1301 int ret;
1092 bool tkip_mic_err; 1302 bool tkip_mic_err;
1093 bool decrypt_err; 1303 bool decrypt_err;
1094 u8 *fw_desc; 1304 u8 *fw_desc;
@@ -1102,24 +1312,21 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1102 msdu_tail = NULL; 1312 msdu_tail = NULL;
1103 1313
1104 spin_lock_bh(&htt->rx_ring.lock); 1314 spin_lock_bh(&htt->rx_ring.lock);
1105 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, 1315 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1106 &msdu_head, &msdu_tail); 1316 &msdu_head, &msdu_tail);
1107 spin_unlock_bh(&htt->rx_ring.lock); 1317 spin_unlock_bh(&htt->rx_ring.lock);
1108 1318
1109 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); 1319 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1110 1320
1111 if (!msdu_head) { 1321 if (ret) {
1112 ath10k_warn("htt rx frag no data\n"); 1322 ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1113 return; 1323 ret);
1114 }
1115
1116 if (msdu_chaining || msdu_head != msdu_tail) {
1117 ath10k_warn("aggregation with fragmentation?!\n");
1118 ath10k_htt_rx_free_msdu_chain(msdu_head); 1324 ath10k_htt_rx_free_msdu_chain(msdu_head);
1119 return; 1325 return;
1120 } 1326 }
1121 1327
1122 /* FIXME: implement signal strength */ 1328 /* FIXME: implement signal strength */
1329 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1123 1330
1124 hdr = (struct ieee80211_hdr *)msdu_head->data; 1331 hdr = (struct ieee80211_hdr *)msdu_head->data;
1125 rxd = (void *)msdu_head->data - sizeof(*rxd); 1332 rxd = (void *)msdu_head->data - sizeof(*rxd);
@@ -1136,57 +1343,55 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1136 goto end; 1343 goto end;
1137 } 1344 }
1138 1345
1139 info.skb = msdu_head; 1346 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1140 info.status = HTT_RX_IND_MPDU_STATUS_OK; 1347 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1141 info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1348 ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
1142 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1349 true);
1143 info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); 1350 msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
1144 1351
1145 if (tkip_mic_err) { 1352 if (tkip_mic_err)
1146 ath10k_warn("tkip mic error\n"); 1353 ath10k_warn("tkip mic error\n");
1147 info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
1148 }
1149 1354
1150 if (decrypt_err) { 1355 if (decrypt_err) {
1151 ath10k_warn("decryption err in fragmented rx\n"); 1356 ath10k_warn("decryption err in fragmented rx\n");
1152 dev_kfree_skb_any(info.skb); 1357 dev_kfree_skb_any(msdu_head);
1153 goto end; 1358 goto end;
1154 } 1359 }
1155 1360
1156 if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { 1361 if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
1157 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1362 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1158 paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); 1363 paramlen = ath10k_htt_rx_crypto_param_len(enctype);
1159 1364
1160 /* It is more efficient to move the header than the payload */ 1365 /* It is more efficient to move the header than the payload */
1161 memmove((void *)info.skb->data + paramlen, 1366 memmove((void *)msdu_head->data + paramlen,
1162 (void *)info.skb->data, 1367 (void *)msdu_head->data,
1163 hdrlen); 1368 hdrlen);
1164 skb_pull(info.skb, paramlen); 1369 skb_pull(msdu_head, paramlen);
1165 hdr = (struct ieee80211_hdr *)info.skb->data; 1370 hdr = (struct ieee80211_hdr *)msdu_head->data;
1166 } 1371 }
1167 1372
1168 /* remove trailing FCS */ 1373 /* remove trailing FCS */
1169 trim = 4; 1374 trim = 4;
1170 1375
1171 /* remove crypto trailer */ 1376 /* remove crypto trailer */
1172 trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); 1377 trim += ath10k_htt_rx_crypto_tail_len(enctype);
1173 1378
1174 /* last fragment of TKIP frags has MIC */ 1379 /* last fragment of TKIP frags has MIC */
1175 if (!ieee80211_has_morefrags(hdr->frame_control) && 1380 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1176 info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1381 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1177 trim += 8; 1382 trim += 8;
1178 1383
1179 if (trim > info.skb->len) { 1384 if (trim > msdu_head->len) {
1180 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); 1385 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1181 dev_kfree_skb_any(info.skb); 1386 dev_kfree_skb_any(msdu_head);
1182 goto end; 1387 goto end;
1183 } 1388 }
1184 1389
1185 skb_trim(info.skb, info.skb->len - trim); 1390 skb_trim(msdu_head, msdu_head->len - trim);
1186 1391
1187 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", 1392 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1188 info.skb->data, info.skb->len); 1393 msdu_head->data, msdu_head->len);
1189 ath10k_process_rx(htt->ar, &info); 1394 ath10k_process_rx(htt->ar, rx_status, msdu_head);
1190 1395
1191end: 1396end:
1192 if (fw_desc_len > 0) { 1397 if (fw_desc_len > 0) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7a3e2e40dd5c..7064354d1f4f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -83,7 +83,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
83 __clear_bit(msdu_id, htt->used_msdu_ids); 83 __clear_bit(msdu_id, htt->used_msdu_ids);
84} 84}
85 85
86int ath10k_htt_tx_attach(struct ath10k_htt *htt) 86int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
87{ 87{
88 spin_lock_init(&htt->tx_lock); 88 spin_lock_init(&htt->tx_lock);
89 init_waitqueue_head(&htt->empty_tx_wq); 89 init_waitqueue_head(&htt->empty_tx_wq);
@@ -120,7 +120,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
120 return 0; 120 return 0;
121} 121}
122 122
123static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) 123static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
124{ 124{
125 struct htt_tx_done tx_done = {0}; 125 struct htt_tx_done tx_done = {0};
126 int msdu_id; 126 int msdu_id;
@@ -141,9 +141,9 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
141 spin_unlock_bh(&htt->tx_lock); 141 spin_unlock_bh(&htt->tx_lock);
142} 142}
143 143
144void ath10k_htt_tx_detach(struct ath10k_htt *htt) 144void ath10k_htt_tx_free(struct ath10k_htt *htt)
145{ 145{
146 ath10k_htt_tx_cleanup_pending(htt); 146 ath10k_htt_tx_free_pending(htt);
147 kfree(htt->pending_tx); 147 kfree(htt->pending_tx);
148 kfree(htt->used_msdu_ids); 148 kfree(htt->used_msdu_ids);
149 dma_pool_destroy(htt->tx_pool); 149 dma_pool_destroy(htt->tx_pool);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35fc44e281f5..007e855f4ba9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -28,6 +28,7 @@
28#define QCA988X_HW_2_0_CHIP_ID_REV 0x2 28#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
29#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" 29#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
30#define QCA988X_HW_2_0_FW_FILE "firmware.bin" 30#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
31#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
31#define QCA988X_HW_2_0_OTP_FILE "otp.bin" 32#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
32#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" 33#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
33#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 34#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 511a2f81e7af..a21080028c54 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -54,7 +54,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
54 switch (key->cipher) { 54 switch (key->cipher) {
55 case WLAN_CIPHER_SUITE_CCMP: 55 case WLAN_CIPHER_SUITE_CCMP:
56 arg.key_cipher = WMI_CIPHER_AES_CCM; 56 arg.key_cipher = WMI_CIPHER_AES_CCM;
57 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 57 if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
58 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
59 else
60 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
58 break; 61 break;
59 case WLAN_CIPHER_SUITE_TKIP: 62 case WLAN_CIPHER_SUITE_TKIP:
60 arg.key_cipher = WMI_CIPHER_TKIP; 63 arg.key_cipher = WMI_CIPHER_TKIP;
@@ -165,7 +168,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
165 first_errno = ret; 168 first_errno = ret;
166 169
167 if (ret) 170 if (ret)
168 ath10k_warn("could not remove peer wep key %d (%d)\n", 171 ath10k_warn("failed to remove peer wep key %d: %d\n",
169 i, ret); 172 i, ret);
170 173
171 peer->keys[i] = NULL; 174 peer->keys[i] = NULL;
@@ -213,7 +216,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
213 first_errno = ret; 216 first_errno = ret;
214 217
215 if (ret) 218 if (ret)
216 ath10k_warn("could not remove key for %pM\n", addr); 219 ath10k_warn("failed to remove key for %pM: %d\n",
220 addr, ret);
217 } 221 }
218 222
219 return first_errno; 223 return first_errno;
@@ -323,14 +327,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
323 327
324 ret = ath10k_wmi_peer_create(ar, vdev_id, addr); 328 ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
325 if (ret) { 329 if (ret) {
326 ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n", 330 ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
327 addr, vdev_id, ret); 331 addr, vdev_id, ret);
328 return ret; 332 return ret;
329 } 333 }
330 334
331 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 335 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
332 if (ret) { 336 if (ret) {
333 ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n", 337 ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
334 addr, vdev_id, ret); 338 addr, vdev_id, ret);
335 return ret; 339 return ret;
336 } 340 }
@@ -351,7 +355,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
351 ret = ath10k_wmi_pdev_set_param(ar, param, 355 ret = ath10k_wmi_pdev_set_param(ar, param,
352 ATH10K_KICKOUT_THRESHOLD); 356 ATH10K_KICKOUT_THRESHOLD);
353 if (ret) { 357 if (ret) {
354 ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n", 358 ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
355 arvif->vdev_id, ret); 359 arvif->vdev_id, ret);
356 return ret; 360 return ret;
357 } 361 }
@@ -360,7 +364,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
360 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 364 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
361 ATH10K_KEEPALIVE_MIN_IDLE); 365 ATH10K_KEEPALIVE_MIN_IDLE);
362 if (ret) { 366 if (ret) {
363 ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n", 367 ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
364 arvif->vdev_id, ret); 368 arvif->vdev_id, ret);
365 return ret; 369 return ret;
366 } 370 }
@@ -369,7 +373,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
369 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 373 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
370 ATH10K_KEEPALIVE_MAX_IDLE); 374 ATH10K_KEEPALIVE_MAX_IDLE);
371 if (ret) { 375 if (ret) {
372 ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n", 376 ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
373 arvif->vdev_id, ret); 377 arvif->vdev_id, ret);
374 return ret; 378 return ret;
375 } 379 }
@@ -378,7 +382,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
378 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 382 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
379 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 383 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
380 if (ret) { 384 if (ret) {
381 ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 385 ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
382 arvif->vdev_id, ret); 386 arvif->vdev_id, ret);
383 return ret; 387 return ret;
384 } 388 }
@@ -488,92 +492,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
488 return 0; 492 return 0;
489} 493}
490 494
491static int ath10k_vdev_start(struct ath10k_vif *arvif) 495static bool ath10k_monitor_is_enabled(struct ath10k *ar)
492{ 496{
493 struct ath10k *ar = arvif->ar;
494 struct cfg80211_chan_def *chandef = &ar->chandef;
495 struct wmi_vdev_start_request_arg arg = {};
496 int ret = 0;
497
498 lockdep_assert_held(&ar->conf_mutex); 497 lockdep_assert_held(&ar->conf_mutex);
499 498
500 reinit_completion(&ar->vdev_setup_done);
501
502 arg.vdev_id = arvif->vdev_id;
503 arg.dtim_period = arvif->dtim_period;
504 arg.bcn_intval = arvif->beacon_interval;
505
506 arg.channel.freq = chandef->chan->center_freq;
507 arg.channel.band_center_freq1 = chandef->center_freq1;
508 arg.channel.mode = chan_to_phymode(chandef);
509
510 arg.channel.min_power = 0;
511 arg.channel.max_power = chandef->chan->max_power * 2;
512 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
513 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
514
515 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
516 arg.ssid = arvif->u.ap.ssid;
517 arg.ssid_len = arvif->u.ap.ssid_len;
518 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
519
520 /* For now allow DFS for AP mode */
521 arg.channel.chan_radar =
522 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
523 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
524 arg.ssid = arvif->vif->bss_conf.ssid;
525 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
526 }
527
528 ath10k_dbg(ATH10K_DBG_MAC, 499 ath10k_dbg(ATH10K_DBG_MAC,
529 "mac vdev %d start center_freq %d phymode %s\n", 500 "mac monitor refs: promisc %d monitor %d cac %d\n",
530 arg.vdev_id, arg.channel.freq, 501 ar->promisc, ar->monitor,
531 ath10k_wmi_phymode_str(arg.channel.mode)); 502 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
532 503
533 ret = ath10k_wmi_vdev_start(ar, &arg); 504 return ar->promisc || ar->monitor ||
534 if (ret) { 505 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
535 ath10k_warn("WMI vdev %i start failed: ret %d\n",
536 arg.vdev_id, ret);
537 return ret;
538 }
539
540 ret = ath10k_vdev_setup_sync(ar);
541 if (ret) {
542 ath10k_warn("vdev %i setup failed %d\n",
543 arg.vdev_id, ret);
544 return ret;
545 }
546
547 return ret;
548} 506}
549 507
550static int ath10k_vdev_stop(struct ath10k_vif *arvif) 508static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
551{
552 struct ath10k *ar = arvif->ar;
553 int ret;
554
555 lockdep_assert_held(&ar->conf_mutex);
556
557 reinit_completion(&ar->vdev_setup_done);
558
559 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
560 if (ret) {
561 ath10k_warn("WMI vdev %i stop failed: ret %d\n",
562 arvif->vdev_id, ret);
563 return ret;
564 }
565
566 ret = ath10k_vdev_setup_sync(ar);
567 if (ret) {
568 ath10k_warn("vdev %i setup sync failed %d\n",
569 arvif->vdev_id, ret);
570 return ret;
571 }
572
573 return ret;
574}
575
576static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
577{ 509{
578 struct cfg80211_chan_def *chandef = &ar->chandef; 510 struct cfg80211_chan_def *chandef = &ar->chandef;
579 struct ieee80211_channel *channel = chandef->chan; 511 struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +514,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
582 514
583 lockdep_assert_held(&ar->conf_mutex); 515 lockdep_assert_held(&ar->conf_mutex);
584 516
585 if (!ar->monitor_present) {
586 ath10k_warn("mac montor stop -- monitor is not present\n");
587 return -EINVAL;
588 }
589
590 arg.vdev_id = vdev_id; 517 arg.vdev_id = vdev_id;
591 arg.channel.freq = channel->center_freq; 518 arg.channel.freq = channel->center_freq;
592 arg.channel.band_center_freq1 = chandef->center_freq1; 519 arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +531,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
604 531
605 ret = ath10k_wmi_vdev_start(ar, &arg); 532 ret = ath10k_wmi_vdev_start(ar, &arg);
606 if (ret) { 533 if (ret) {
607 ath10k_warn("Monitor vdev %i start failed: ret %d\n", 534 ath10k_warn("failed to request monitor vdev %i start: %d\n",
608 vdev_id, ret); 535 vdev_id, ret);
609 return ret; 536 return ret;
610 } 537 }
611 538
612 ret = ath10k_vdev_setup_sync(ar); 539 ret = ath10k_vdev_setup_sync(ar);
613 if (ret) { 540 if (ret) {
614 ath10k_warn("Monitor vdev %i setup failed %d\n", 541 ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
615 vdev_id, ret); 542 vdev_id, ret);
616 return ret; 543 return ret;
617 } 544 }
618 545
619 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 546 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
620 if (ret) { 547 if (ret) {
621 ath10k_warn("Monitor vdev %i up failed: %d\n", 548 ath10k_warn("failed to put up monitor vdev %i: %d\n",
622 vdev_id, ret); 549 vdev_id, ret);
623 goto vdev_stop; 550 goto vdev_stop;
624 } 551 }
625 552
626 ar->monitor_vdev_id = vdev_id; 553 ar->monitor_vdev_id = vdev_id;
627 ar->monitor_enabled = true;
628 554
555 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
556 ar->monitor_vdev_id);
629 return 0; 557 return 0;
630 558
631vdev_stop: 559vdev_stop:
632 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 560 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
633 if (ret) 561 if (ret)
634 ath10k_warn("Monitor vdev %i stop failed: %d\n", 562 ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
635 ar->monitor_vdev_id, ret); 563 ar->monitor_vdev_id, ret);
636 564
637 return ret; 565 return ret;
638} 566}
639 567
640static int ath10k_monitor_stop(struct ath10k *ar) 568static int ath10k_monitor_vdev_stop(struct ath10k *ar)
641{ 569{
642 int ret = 0; 570 int ret = 0;
643 571
644 lockdep_assert_held(&ar->conf_mutex); 572 lockdep_assert_held(&ar->conf_mutex);
645 573
646 if (!ar->monitor_present) {
647 ath10k_warn("mac montor stop -- monitor is not present\n");
648 return -EINVAL;
649 }
650
651 if (!ar->monitor_enabled) {
652 ath10k_warn("mac montor stop -- monitor is not enabled\n");
653 return -EINVAL;
654 }
655
656 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 574 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
657 if (ret) 575 if (ret)
658 ath10k_warn("Monitor vdev %i down failed: %d\n", 576 ath10k_warn("failed to put down monitor vdev %i: %d\n",
659 ar->monitor_vdev_id, ret); 577 ar->monitor_vdev_id, ret);
660 578
661 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 579 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
662 if (ret) 580 if (ret)
663 ath10k_warn("Monitor vdev %i stop failed: %d\n", 581 ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
664 ar->monitor_vdev_id, ret); 582 ar->monitor_vdev_id, ret);
665 583
666 ret = ath10k_vdev_setup_sync(ar); 584 ret = ath10k_vdev_setup_sync(ar);
667 if (ret) 585 if (ret)
668 ath10k_warn("Monitor_down sync failed, vdev %i: %d\n", 586 ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
669 ar->monitor_vdev_id, ret); 587 ar->monitor_vdev_id, ret);
670 588
671 ar->monitor_enabled = false; 589 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
590 ar->monitor_vdev_id);
672 return ret; 591 return ret;
673} 592}
674 593
675static int ath10k_monitor_create(struct ath10k *ar) 594static int ath10k_monitor_vdev_create(struct ath10k *ar)
676{ 595{
677 int bit, ret = 0; 596 int bit, ret = 0;
678 597
679 lockdep_assert_held(&ar->conf_mutex); 598 lockdep_assert_held(&ar->conf_mutex);
680 599
681 if (ar->monitor_present) {
682 ath10k_warn("Monitor mode already enabled\n");
683 return 0;
684 }
685
686 bit = ffs(ar->free_vdev_map); 600 bit = ffs(ar->free_vdev_map);
687 if (bit == 0) { 601 if (bit == 0) {
688 ath10k_warn("No free VDEV slots\n"); 602 ath10k_warn("failed to find free vdev id for monitor vdev\n");
689 return -ENOMEM; 603 return -ENOMEM;
690 } 604 }
691 605
@@ -696,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
696 WMI_VDEV_TYPE_MONITOR, 610 WMI_VDEV_TYPE_MONITOR,
697 0, ar->mac_addr); 611 0, ar->mac_addr);
698 if (ret) { 612 if (ret) {
699 ath10k_warn("WMI vdev %i monitor create failed: ret %d\n", 613 ath10k_warn("failed to request monitor vdev %i creation: %d\n",
700 ar->monitor_vdev_id, ret); 614 ar->monitor_vdev_id, ret);
701 goto vdev_fail; 615 goto vdev_fail;
702 } 616 }
@@ -704,7 +618,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
704 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 618 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
705 ar->monitor_vdev_id); 619 ar->monitor_vdev_id);
706 620
707 ar->monitor_present = true;
708 return 0; 621 return 0;
709 622
710vdev_fail: 623vdev_fail:
@@ -715,48 +628,123 @@ vdev_fail:
715 return ret; 628 return ret;
716} 629}
717 630
718static int ath10k_monitor_destroy(struct ath10k *ar) 631static int ath10k_monitor_vdev_delete(struct ath10k *ar)
719{ 632{
720 int ret = 0; 633 int ret = 0;
721 634
722 lockdep_assert_held(&ar->conf_mutex); 635 lockdep_assert_held(&ar->conf_mutex);
723 636
724 if (!ar->monitor_present)
725 return 0;
726
727 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 637 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
728 if (ret) { 638 if (ret) {
729 ath10k_warn("WMI vdev %i monitor delete failed: %d\n", 639 ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
730 ar->monitor_vdev_id, ret); 640 ar->monitor_vdev_id, ret);
731 return ret; 641 return ret;
732 } 642 }
733 643
734 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); 644 ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
735 ar->monitor_present = false;
736 645
737 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 646 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
738 ar->monitor_vdev_id); 647 ar->monitor_vdev_id);
739 return ret; 648 return ret;
740} 649}
741 650
742static int ath10k_start_cac(struct ath10k *ar) 651static int ath10k_monitor_start(struct ath10k *ar)
743{ 652{
744 int ret; 653 int ret;
745 654
746 lockdep_assert_held(&ar->conf_mutex); 655 lockdep_assert_held(&ar->conf_mutex);
747 656
748 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 657 if (!ath10k_monitor_is_enabled(ar)) {
658 ath10k_warn("trying to start monitor with no references\n");
659 return 0;
660 }
661
662 if (ar->monitor_started) {
663 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
664 return 0;
665 }
749 666
750 ret = ath10k_monitor_create(ar); 667 ret = ath10k_monitor_vdev_create(ar);
751 if (ret) { 668 if (ret) {
752 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 669 ath10k_warn("failed to create monitor vdev: %d\n", ret);
753 return ret; 670 return ret;
754 } 671 }
755 672
756 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); 673 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
674 if (ret) {
675 ath10k_warn("failed to start monitor vdev: %d\n", ret);
676 ath10k_monitor_vdev_delete(ar);
677 return ret;
678 }
679
680 ar->monitor_started = true;
681 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
682
683 return 0;
684}
685
686static void ath10k_monitor_stop(struct ath10k *ar)
687{
688 int ret;
689
690 lockdep_assert_held(&ar->conf_mutex);
691
692 if (ath10k_monitor_is_enabled(ar)) {
693 ath10k_dbg(ATH10K_DBG_MAC,
694 "mac monitor will be stopped later\n");
695 return;
696 }
697
698 if (!ar->monitor_started) {
699 ath10k_dbg(ATH10K_DBG_MAC,
700 "mac monitor probably failed to start earlier\n");
701 return;
702 }
703
704 ret = ath10k_monitor_vdev_stop(ar);
705 if (ret)
706 ath10k_warn("failed to stop monitor vdev: %d\n", ret);
707
708 ret = ath10k_monitor_vdev_delete(ar);
709 if (ret)
710 ath10k_warn("failed to delete monitor vdev: %d\n", ret);
711
712 ar->monitor_started = false;
713 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
714}
715
716static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
717{
718 struct ath10k *ar = arvif->ar;
719 u32 vdev_param, rts_cts = 0;
720
721 lockdep_assert_held(&ar->conf_mutex);
722
723 vdev_param = ar->wmi.vdev_param->enable_rtscts;
724
725 if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
726 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
727
728 if (arvif->num_legacy_stations > 0)
729 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
730 WMI_RTSCTS_PROFILE);
731
732 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
733 rts_cts);
734}
735
736static int ath10k_start_cac(struct ath10k *ar)
737{
738 int ret;
739
740 lockdep_assert_held(&ar->conf_mutex);
741
742 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
743
744 ret = ath10k_monitor_start(ar);
757 if (ret) { 745 if (ret) {
746 ath10k_warn("failed to start monitor (cac): %d\n", ret);
758 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 747 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
759 ath10k_monitor_destroy(ar);
760 return ret; 748 return ret;
761 } 749 }
762 750
@@ -774,58 +762,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
774 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 762 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
775 return 0; 763 return 0;
776 764
777 ath10k_monitor_stop(ar);
778 ath10k_monitor_destroy(ar);
779 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 765 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
766 ath10k_monitor_stop(ar);
780 767
781 ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n"); 768 ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
782 769
783 return 0; 770 return 0;
784} 771}
785 772
786static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state) 773static void ath10k_recalc_radar_detection(struct ath10k *ar)
787{ 774{
788 switch (dfs_state) {
789 case NL80211_DFS_USABLE:
790 return "USABLE";
791 case NL80211_DFS_UNAVAILABLE:
792 return "UNAVAILABLE";
793 case NL80211_DFS_AVAILABLE:
794 return "AVAILABLE";
795 default:
796 WARN_ON(1);
797 return "bug";
798 }
799}
800
801static void ath10k_config_radar_detection(struct ath10k *ar)
802{
803 struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
804 bool radar = ar->hw->conf.radar_enabled;
805 bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
806 enum nl80211_dfs_state dfs_state = chan->dfs_state;
807 int ret; 775 int ret;
808 776
809 lockdep_assert_held(&ar->conf_mutex); 777 lockdep_assert_held(&ar->conf_mutex);
810 778
811 ath10k_dbg(ATH10K_DBG_MAC,
812 "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
813 chan->center_freq, radar, chan_radar,
814 ath10k_dfs_state(dfs_state));
815
816 /*
817 * It's safe to call it even if CAC is not started.
818 * This call here guarantees changing channel, etc. will stop CAC.
819 */
820 ath10k_stop_cac(ar); 779 ath10k_stop_cac(ar);
821 780
822 if (!radar) 781 if (!ar->radar_enabled)
823 return;
824
825 if (!chan_radar)
826 return; 782 return;
827 783
828 if (dfs_state != NL80211_DFS_USABLE) 784 if (ar->num_started_vdevs > 0)
829 return; 785 return;
830 786
831 ret = ath10k_start_cac(ar); 787 ret = ath10k_start_cac(ar);
@@ -835,11 +791,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
835 * radiation is not allowed, make this channel DFS_UNAVAILABLE 791 * radiation is not allowed, make this channel DFS_UNAVAILABLE
836 * by indicating that radar was detected. 792 * by indicating that radar was detected.
837 */ 793 */
838 ath10k_warn("failed to start CAC (%d)\n", ret); 794 ath10k_warn("failed to start CAC: %d\n", ret);
839 ieee80211_radar_detected(ar->hw); 795 ieee80211_radar_detected(ar->hw);
840 } 796 }
841} 797}
842 798
799static int ath10k_vdev_start(struct ath10k_vif *arvif)
800{
801 struct ath10k *ar = arvif->ar;
802 struct cfg80211_chan_def *chandef = &ar->chandef;
803 struct wmi_vdev_start_request_arg arg = {};
804 int ret = 0;
805
806 lockdep_assert_held(&ar->conf_mutex);
807
808 reinit_completion(&ar->vdev_setup_done);
809
810 arg.vdev_id = arvif->vdev_id;
811 arg.dtim_period = arvif->dtim_period;
812 arg.bcn_intval = arvif->beacon_interval;
813
814 arg.channel.freq = chandef->chan->center_freq;
815 arg.channel.band_center_freq1 = chandef->center_freq1;
816 arg.channel.mode = chan_to_phymode(chandef);
817
818 arg.channel.min_power = 0;
819 arg.channel.max_power = chandef->chan->max_power * 2;
820 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
821 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
822
823 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
824 arg.ssid = arvif->u.ap.ssid;
825 arg.ssid_len = arvif->u.ap.ssid_len;
826 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
827
828 /* For now allow DFS for AP mode */
829 arg.channel.chan_radar =
830 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
831 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
832 arg.ssid = arvif->vif->bss_conf.ssid;
833 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
834 }
835
836 ath10k_dbg(ATH10K_DBG_MAC,
837 "mac vdev %d start center_freq %d phymode %s\n",
838 arg.vdev_id, arg.channel.freq,
839 ath10k_wmi_phymode_str(arg.channel.mode));
840
841 ret = ath10k_wmi_vdev_start(ar, &arg);
842 if (ret) {
843 ath10k_warn("failed to start WMI vdev %i: %d\n",
844 arg.vdev_id, ret);
845 return ret;
846 }
847
848 ret = ath10k_vdev_setup_sync(ar);
849 if (ret) {
850 ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
851 arg.vdev_id, ret);
852 return ret;
853 }
854
855 ar->num_started_vdevs++;
856 ath10k_recalc_radar_detection(ar);
857
858 return ret;
859}
860
861static int ath10k_vdev_stop(struct ath10k_vif *arvif)
862{
863 struct ath10k *ar = arvif->ar;
864 int ret;
865
866 lockdep_assert_held(&ar->conf_mutex);
867
868 reinit_completion(&ar->vdev_setup_done);
869
870 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
871 if (ret) {
872 ath10k_warn("failed to stop WMI vdev %i: %d\n",
873 arvif->vdev_id, ret);
874 return ret;
875 }
876
877 ret = ath10k_vdev_setup_sync(ar);
878 if (ret) {
879 ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
880 arvif->vdev_id, ret);
881 return ret;
882 }
883
884 WARN_ON(ar->num_started_vdevs == 0);
885
886 if (ar->num_started_vdevs != 0) {
887 ar->num_started_vdevs--;
888 ath10k_recalc_radar_detection(ar);
889 }
890
891 return ret;
892}
893
843static void ath10k_control_beaconing(struct ath10k_vif *arvif, 894static void ath10k_control_beaconing(struct ath10k_vif *arvif,
844 struct ieee80211_bss_conf *info) 895 struct ieee80211_bss_conf *info)
845{ 896{
@@ -880,7 +931,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
880 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 931 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
881 arvif->bssid); 932 arvif->bssid);
882 if (ret) { 933 if (ret) {
883 ath10k_warn("Failed to bring up vdev %d: %i\n", 934 ath10k_warn("failed to bring up vdev %d: %i\n",
884 arvif->vdev_id, ret); 935 arvif->vdev_id, ret);
885 ath10k_vdev_stop(arvif); 936 ath10k_vdev_stop(arvif);
886 return; 937 return;
@@ -904,7 +955,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
904 if (!info->ibss_joined) { 955 if (!info->ibss_joined) {
905 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); 956 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
906 if (ret) 957 if (ret)
907 ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", 958 ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
908 self_peer, arvif->vdev_id, ret); 959 self_peer, arvif->vdev_id, ret);
909 960
910 if (is_zero_ether_addr(arvif->bssid)) 961 if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +964,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
913 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, 964 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
914 arvif->bssid); 965 arvif->bssid);
915 if (ret) { 966 if (ret) {
916 ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", 967 ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
917 arvif->bssid, arvif->vdev_id, ret); 968 arvif->bssid, arvif->vdev_id, ret);
918 return; 969 return;
919 } 970 }
@@ -925,7 +976,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
925 976
926 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); 977 ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
927 if (ret) { 978 if (ret) {
928 ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n", 979 ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
929 self_peer, arvif->vdev_id, ret); 980 self_peer, arvif->vdev_id, ret);
930 return; 981 return;
931 } 982 }
@@ -934,7 +985,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
934 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 985 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
935 ATH10K_DEFAULT_ATIM); 986 ATH10K_DEFAULT_ATIM);
936 if (ret) 987 if (ret)
937 ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", 988 ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
938 arvif->vdev_id, ret); 989 arvif->vdev_id, ret);
939} 990}
940 991
@@ -961,7 +1012,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
961 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1012 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
962 conf->dynamic_ps_timeout); 1013 conf->dynamic_ps_timeout);
963 if (ret) { 1014 if (ret) {
964 ath10k_warn("Failed to set inactivity time for vdev %d: %i\n", 1015 ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
965 arvif->vdev_id, ret); 1016 arvif->vdev_id, ret);
966 return ret; 1017 return ret;
967 } 1018 }
@@ -974,8 +1025,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
974 1025
975 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1026 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
976 if (ret) { 1027 if (ret) {
977 ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", 1028 ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
978 psmode, arvif->vdev_id); 1029 psmode, arvif->vdev_id, ret);
979 return ret; 1030 return ret;
980 } 1031 }
981 1032
@@ -1429,7 +1480,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1429 1480
1430 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 1481 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
1431 if (!ap_sta) { 1482 if (!ap_sta) {
1432 ath10k_warn("Failed to find station entry for %pM, vdev %i\n", 1483 ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
1433 bss_conf->bssid, arvif->vdev_id); 1484 bss_conf->bssid, arvif->vdev_id);
1434 rcu_read_unlock(); 1485 rcu_read_unlock();
1435 return; 1486 return;
@@ -1442,7 +1493,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1442 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, 1493 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
1443 bss_conf, &peer_arg); 1494 bss_conf, &peer_arg);
1444 if (ret) { 1495 if (ret) {
1445 ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d", 1496 ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
1446 bss_conf->bssid, arvif->vdev_id, ret); 1497 bss_conf->bssid, arvif->vdev_id, ret);
1447 rcu_read_unlock(); 1498 rcu_read_unlock();
1448 return; 1499 return;
@@ -1452,7 +1503,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1452 1503
1453 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1504 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1454 if (ret) { 1505 if (ret) {
1455 ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d", 1506 ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
1456 bss_conf->bssid, arvif->vdev_id, ret); 1507 bss_conf->bssid, arvif->vdev_id, ret);
1457 return; 1508 return;
1458 } 1509 }
@@ -1473,7 +1524,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1473 1524
1474 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 1525 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
1475 if (ret) { 1526 if (ret) {
1476 ath10k_warn("VDEV: %d up failed: ret %d\n", 1527 ath10k_warn("failed to set vdev %d up: %d\n",
1477 arvif->vdev_id, ret); 1528 arvif->vdev_id, ret);
1478 return; 1529 return;
1479 } 1530 }
@@ -1524,7 +1575,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1524} 1575}
1525 1576
1526static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, 1577static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1527 struct ieee80211_sta *sta) 1578 struct ieee80211_sta *sta, bool reassoc)
1528{ 1579{
1529 struct wmi_peer_assoc_complete_arg peer_arg; 1580 struct wmi_peer_assoc_complete_arg peer_arg;
1530 int ret = 0; 1581 int ret = 0;
@@ -1533,34 +1584,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1533 1584
1534 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); 1585 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
1535 if (ret) { 1586 if (ret) {
1536 ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n", 1587 ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
1537 sta->addr, arvif->vdev_id, ret); 1588 sta->addr, arvif->vdev_id, ret);
1538 return ret; 1589 return ret;
1539 } 1590 }
1540 1591
1592 peer_arg.peer_reassoc = reassoc;
1541 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1593 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1542 if (ret) { 1594 if (ret) {
1543 ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n", 1595 ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
1544 sta->addr, arvif->vdev_id, ret); 1596 sta->addr, arvif->vdev_id, ret);
1545 return ret; 1597 return ret;
1546 } 1598 }
1547 1599
1548 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); 1600 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
1549 if (ret) { 1601 if (ret) {
1550 ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret); 1602 ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
1603 arvif->vdev_id, ret);
1551 return ret; 1604 return ret;
1552 } 1605 }
1553 1606
1607 if (!sta->wme) {
1608 arvif->num_legacy_stations++;
1609 ret = ath10k_recalc_rtscts_prot(arvif);
1610 if (ret) {
1611 ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
1612 arvif->vdev_id, ret);
1613 return ret;
1614 }
1615 }
1616
1554 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 1617 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1555 if (ret) { 1618 if (ret) {
1556 ath10k_warn("could not install peer wep keys for vdev %i: %d\n", 1619 ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
1557 arvif->vdev_id, ret); 1620 arvif->vdev_id, ret);
1558 return ret; 1621 return ret;
1559 } 1622 }
1560 1623
1561 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 1624 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
1562 if (ret) { 1625 if (ret) {
1563 ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n", 1626 ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
1564 sta->addr, arvif->vdev_id, ret); 1627 sta->addr, arvif->vdev_id, ret);
1565 return ret; 1628 return ret;
1566 } 1629 }
@@ -1575,9 +1638,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1575 1638
1576 lockdep_assert_held(&ar->conf_mutex); 1639 lockdep_assert_held(&ar->conf_mutex);
1577 1640
1641 if (!sta->wme) {
1642 arvif->num_legacy_stations--;
1643 ret = ath10k_recalc_rtscts_prot(arvif);
1644 if (ret) {
1645 ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
1646 arvif->vdev_id, ret);
1647 return ret;
1648 }
1649 }
1650
1578 ret = ath10k_clear_peer_keys(arvif, sta->addr); 1651 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1579 if (ret) { 1652 if (ret) {
1580 ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n", 1653 ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
1581 arvif->vdev_id, ret); 1654 arvif->vdev_id, ret);
1582 return ret; 1655 return ret;
1583 } 1656 }
@@ -1685,19 +1758,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
1685 return ret; 1758 return ret;
1686} 1759}
1687 1760
1761static enum wmi_dfs_region
1762ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
1763{
1764 switch (dfs_region) {
1765 case NL80211_DFS_UNSET:
1766 return WMI_UNINIT_DFS_DOMAIN;
1767 case NL80211_DFS_FCC:
1768 return WMI_FCC_DFS_DOMAIN;
1769 case NL80211_DFS_ETSI:
1770 return WMI_ETSI_DFS_DOMAIN;
1771 case NL80211_DFS_JP:
1772 return WMI_MKK4_DFS_DOMAIN;
1773 }
1774 return WMI_UNINIT_DFS_DOMAIN;
1775}
1776
1688static void ath10k_regd_update(struct ath10k *ar) 1777static void ath10k_regd_update(struct ath10k *ar)
1689{ 1778{
1690 struct reg_dmn_pair_mapping *regpair; 1779 struct reg_dmn_pair_mapping *regpair;
1691 int ret; 1780 int ret;
1781 enum wmi_dfs_region wmi_dfs_reg;
1782 enum nl80211_dfs_regions nl_dfs_reg;
1692 1783
1693 lockdep_assert_held(&ar->conf_mutex); 1784 lockdep_assert_held(&ar->conf_mutex);
1694 1785
1695 ret = ath10k_update_channel_list(ar); 1786 ret = ath10k_update_channel_list(ar);
1696 if (ret) 1787 if (ret)
1697 ath10k_warn("could not update channel list (%d)\n", ret); 1788 ath10k_warn("failed to update channel list: %d\n", ret);
1698 1789
1699 regpair = ar->ath_common.regulatory.regpair; 1790 regpair = ar->ath_common.regulatory.regpair;
1700 1791
1792 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
1793 nl_dfs_reg = ar->dfs_detector->region;
1794 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
1795 } else {
1796 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
1797 }
1798
1701 /* Target allows setting up per-band regdomain but ath_common provides 1799 /* Target allows setting up per-band regdomain but ath_common provides
1702 * a combined one only */ 1800 * a combined one only */
1703 ret = ath10k_wmi_pdev_set_regdomain(ar, 1801 ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1803,10 @@ static void ath10k_regd_update(struct ath10k *ar)
1705 regpair->reg_domain, /* 2ghz */ 1803 regpair->reg_domain, /* 2ghz */
1706 regpair->reg_domain, /* 5ghz */ 1804 regpair->reg_domain, /* 5ghz */
1707 regpair->reg_2ghz_ctl, 1805 regpair->reg_2ghz_ctl,
1708 regpair->reg_5ghz_ctl); 1806 regpair->reg_5ghz_ctl,
1807 wmi_dfs_reg);
1709 if (ret) 1808 if (ret)
1710 ath10k_warn("could not set pdev regdomain (%d)\n", ret); 1809 ath10k_warn("failed to set pdev regdomain: %d\n", ret);
1711} 1810}
1712 1811
1713static void ath10k_reg_notifier(struct wiphy *wiphy, 1812static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1824,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
1725 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 1824 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
1726 request->dfs_region); 1825 request->dfs_region);
1727 if (!result) 1826 if (!result)
1728 ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n", 1827 ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
1729 request->dfs_region); 1828 request->dfs_region);
1730 } 1829 }
1731 1830
@@ -1759,10 +1858,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
1759 if (info->control.vif) 1858 if (info->control.vif)
1760 return ath10k_vif_to_arvif(info->control.vif)->vdev_id; 1859 return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
1761 1860
1762 if (ar->monitor_enabled) 1861 if (ar->monitor_started)
1763 return ar->monitor_vdev_id; 1862 return ar->monitor_vdev_id;
1764 1863
1765 ath10k_warn("could not resolve vdev id\n"); 1864 ath10k_warn("failed to resolve vdev id\n");
1766 return 0; 1865 return 0;
1767} 1866}
1768 1867
@@ -1792,8 +1891,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
1792 wep_key_work); 1891 wep_key_work);
1793 int ret, keyidx = arvif->def_wep_key_newidx; 1892 int ret, keyidx = arvif->def_wep_key_newidx;
1794 1893
1894 mutex_lock(&arvif->ar->conf_mutex);
1895
1896 if (arvif->ar->state != ATH10K_STATE_ON)
1897 goto unlock;
1898
1795 if (arvif->def_wep_key_idx == keyidx) 1899 if (arvif->def_wep_key_idx == keyidx)
1796 return; 1900 goto unlock;
1797 1901
1798 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 1902 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
1799 arvif->vdev_id, keyidx); 1903 arvif->vdev_id, keyidx);
@@ -1803,11 +1907,16 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
1803 arvif->ar->wmi.vdev_param->def_keyid, 1907 arvif->ar->wmi.vdev_param->def_keyid,
1804 keyidx); 1908 keyidx);
1805 if (ret) { 1909 if (ret) {
1806 ath10k_warn("could not update wep keyidx (%d)\n", ret); 1910 ath10k_warn("failed to update wep key index for vdev %d: %d\n",
1807 return; 1911 arvif->vdev_id,
1912 ret);
1913 goto unlock;
1808 } 1914 }
1809 1915
1810 arvif->def_wep_key_idx = keyidx; 1916 arvif->def_wep_key_idx = keyidx;
1917
1918unlock:
1919 mutex_unlock(&arvif->ar->conf_mutex);
1811} 1920}
1812 1921
1813static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) 1922static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1879,7 +1988,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1879 ar->fw_features)) { 1988 ar->fw_features)) {
1880 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= 1989 if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
1881 ATH10K_MAX_NUM_MGMT_PENDING) { 1990 ATH10K_MAX_NUM_MGMT_PENDING) {
1882 ath10k_warn("wmi mgmt_tx queue limit reached\n"); 1991 ath10k_warn("reached WMI management tranmist queue limit\n");
1883 ret = -EBUSY; 1992 ret = -EBUSY;
1884 goto exit; 1993 goto exit;
1885 } 1994 }
@@ -1903,7 +2012,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
1903 2012
1904exit: 2013exit:
1905 if (ret) { 2014 if (ret) {
1906 ath10k_warn("tx failed (%d). dropping packet.\n", ret); 2015 ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
1907 ieee80211_free_txskb(ar->hw, skb); 2016 ieee80211_free_txskb(ar->hw, skb);
1908 } 2017 }
1909} 2018}
@@ -1964,7 +2073,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1964 if (!peer) { 2073 if (!peer) {
1965 ret = ath10k_peer_create(ar, vdev_id, peer_addr); 2074 ret = ath10k_peer_create(ar, vdev_id, peer_addr);
1966 if (ret) 2075 if (ret)
1967 ath10k_warn("peer %pM on vdev %d not created (%d)\n", 2076 ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
1968 peer_addr, vdev_id, ret); 2077 peer_addr, vdev_id, ret);
1969 } 2078 }
1970 2079
@@ -1984,7 +2093,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
1984 if (!peer) { 2093 if (!peer) {
1985 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 2094 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
1986 if (ret) 2095 if (ret)
1987 ath10k_warn("peer %pM on vdev %d not deleted (%d)\n", 2096 ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
1988 peer_addr, vdev_id, ret); 2097 peer_addr, vdev_id, ret);
1989 } 2098 }
1990 2099
@@ -2018,7 +2127,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
2018 2127
2019 ret = ath10k_wmi_mgmt_tx(ar, skb); 2128 ret = ath10k_wmi_mgmt_tx(ar, skb);
2020 if (ret) { 2129 if (ret) {
2021 ath10k_warn("wmi mgmt_tx failed (%d)\n", ret); 2130 ath10k_warn("failed to transmit management frame via WMI: %d\n",
2131 ret);
2022 ieee80211_free_txskb(ar->hw, skb); 2132 ieee80211_free_txskb(ar->hw, skb);
2023 } 2133 }
2024 } 2134 }
@@ -2043,7 +2153,7 @@ void ath10k_reset_scan(unsigned long ptr)
2043 return; 2153 return;
2044 } 2154 }
2045 2155
2046 ath10k_warn("scan timeout. resetting. fw issue?\n"); 2156 ath10k_warn("scan timed out, firmware problem?\n");
2047 2157
2048 if (ar->scan.is_roc) 2158 if (ar->scan.is_roc)
2049 ieee80211_remain_on_channel_expired(ar->hw); 2159 ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2189,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
2079 2189
2080 ret = ath10k_wmi_stop_scan(ar, &arg); 2190 ret = ath10k_wmi_stop_scan(ar, &arg);
2081 if (ret) { 2191 if (ret) {
2082 ath10k_warn("could not submit wmi stop scan (%d)\n", ret); 2192 ath10k_warn("failed to stop wmi scan: %d\n", ret);
2083 spin_lock_bh(&ar->data_lock); 2193 spin_lock_bh(&ar->data_lock);
2084 ar->scan.in_progress = false; 2194 ar->scan.in_progress = false;
2085 ath10k_offchan_tx_purge(ar); 2195 ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2209,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
2099 2209
2100 spin_lock_bh(&ar->data_lock); 2210 spin_lock_bh(&ar->data_lock);
2101 if (ar->scan.in_progress) { 2211 if (ar->scan.in_progress) {
2102 ath10k_warn("could not stop scan. its still in progress\n"); 2212 ath10k_warn("failed to stop scan, it's still in progress\n");
2103 ar->scan.in_progress = false; 2213 ar->scan.in_progress = false;
2104 ath10k_offchan_tx_purge(ar); 2214 ath10k_offchan_tx_purge(ar);
2105 ret = -ETIMEDOUT; 2215 ret = -ETIMEDOUT;
@@ -2187,72 +2297,171 @@ static void ath10k_tx(struct ieee80211_hw *hw,
2187 ath10k_tx_htt(ar, skb); 2297 ath10k_tx_htt(ar, skb);
2188} 2298}
2189 2299
2190/* 2300/* Must not be called with conf_mutex held as workers can use that also. */
2191 * Initialize various parameters with default vaules. 2301static void ath10k_drain_tx(struct ath10k *ar)
2192 */ 2302{
2303 /* make sure rcu-protected mac80211 tx path itself is drained */
2304 synchronize_net();
2305
2306 ath10k_offchan_tx_purge(ar);
2307 ath10k_mgmt_over_wmi_tx_purge(ar);
2308
2309 cancel_work_sync(&ar->offchan_tx_work);
2310 cancel_work_sync(&ar->wmi_mgmt_tx_work);
2311}
2312
2193void ath10k_halt(struct ath10k *ar) 2313void ath10k_halt(struct ath10k *ar)
2194{ 2314{
2315 struct ath10k_vif *arvif;
2316
2195 lockdep_assert_held(&ar->conf_mutex); 2317 lockdep_assert_held(&ar->conf_mutex);
2196 2318
2197 ath10k_stop_cac(ar); 2319 if (ath10k_monitor_is_enabled(ar)) {
2320 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
2321 ar->promisc = false;
2322 ar->monitor = false;
2323 ath10k_monitor_stop(ar);
2324 }
2325
2198 del_timer_sync(&ar->scan.timeout); 2326 del_timer_sync(&ar->scan.timeout);
2199 ath10k_offchan_tx_purge(ar); 2327 ath10k_reset_scan((unsigned long)ar);
2200 ath10k_mgmt_over_wmi_tx_purge(ar);
2201 ath10k_peer_cleanup_all(ar); 2328 ath10k_peer_cleanup_all(ar);
2202 ath10k_core_stop(ar); 2329 ath10k_core_stop(ar);
2203 ath10k_hif_power_down(ar); 2330 ath10k_hif_power_down(ar);
2204 2331
2205 spin_lock_bh(&ar->data_lock); 2332 spin_lock_bh(&ar->data_lock);
2206 if (ar->scan.in_progress) { 2333 list_for_each_entry(arvif, &ar->arvifs, list) {
2207 del_timer(&ar->scan.timeout); 2334 if (!arvif->beacon)
2208 ar->scan.in_progress = false; 2335 continue;
2209 ieee80211_scan_completed(ar->hw, true); 2336
2337 dma_unmap_single(arvif->ar->dev,
2338 ATH10K_SKB_CB(arvif->beacon)->paddr,
2339 arvif->beacon->len, DMA_TO_DEVICE);
2340 dev_kfree_skb_any(arvif->beacon);
2341 arvif->beacon = NULL;
2210 } 2342 }
2211 spin_unlock_bh(&ar->data_lock); 2343 spin_unlock_bh(&ar->data_lock);
2212} 2344}
2213 2345
2346static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2347{
2348 struct ath10k *ar = hw->priv;
2349
2350 mutex_lock(&ar->conf_mutex);
2351
2352 if (ar->cfg_tx_chainmask) {
2353 *tx_ant = ar->cfg_tx_chainmask;
2354 *rx_ant = ar->cfg_rx_chainmask;
2355 } else {
2356 *tx_ant = ar->supp_tx_chainmask;
2357 *rx_ant = ar->supp_rx_chainmask;
2358 }
2359
2360 mutex_unlock(&ar->conf_mutex);
2361
2362 return 0;
2363}
2364
2365static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
2366{
2367 int ret;
2368
2369 lockdep_assert_held(&ar->conf_mutex);
2370
2371 ar->cfg_tx_chainmask = tx_ant;
2372 ar->cfg_rx_chainmask = rx_ant;
2373
2374 if ((ar->state != ATH10K_STATE_ON) &&
2375 (ar->state != ATH10K_STATE_RESTARTED))
2376 return 0;
2377
2378 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
2379 tx_ant);
2380 if (ret) {
2381 ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n",
2382 ret, tx_ant);
2383 return ret;
2384 }
2385
2386 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
2387 rx_ant);
2388 if (ret) {
2389 ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n",
2390 ret, rx_ant);
2391 return ret;
2392 }
2393
2394 return 0;
2395}
2396
2397static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
2398{
2399 struct ath10k *ar = hw->priv;
2400 int ret;
2401
2402 mutex_lock(&ar->conf_mutex);
2403 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
2404 mutex_unlock(&ar->conf_mutex);
2405 return ret;
2406}
2407
2214static int ath10k_start(struct ieee80211_hw *hw) 2408static int ath10k_start(struct ieee80211_hw *hw)
2215{ 2409{
2216 struct ath10k *ar = hw->priv; 2410 struct ath10k *ar = hw->priv;
2217 int ret = 0; 2411 int ret = 0;
2218 2412
2413 /*
2414 * This makes sense only when restarting hw. It is harmless to call
2415 * uncoditionally. This is necessary to make sure no HTT/WMI tx
2416 * commands will be submitted while restarting.
2417 */
2418 ath10k_drain_tx(ar);
2419
2219 mutex_lock(&ar->conf_mutex); 2420 mutex_lock(&ar->conf_mutex);
2220 2421
2221 if (ar->state != ATH10K_STATE_OFF && 2422 switch (ar->state) {
2222 ar->state != ATH10K_STATE_RESTARTING) { 2423 case ATH10K_STATE_OFF:
2424 ar->state = ATH10K_STATE_ON;
2425 break;
2426 case ATH10K_STATE_RESTARTING:
2427 ath10k_halt(ar);
2428 ar->state = ATH10K_STATE_RESTARTED;
2429 break;
2430 case ATH10K_STATE_ON:
2431 case ATH10K_STATE_RESTARTED:
2432 case ATH10K_STATE_WEDGED:
2433 WARN_ON(1);
2223 ret = -EINVAL; 2434 ret = -EINVAL;
2224 goto exit; 2435 goto err;
2225 } 2436 }
2226 2437
2227 ret = ath10k_hif_power_up(ar); 2438 ret = ath10k_hif_power_up(ar);
2228 if (ret) { 2439 if (ret) {
2229 ath10k_err("could not init hif (%d)\n", ret); 2440 ath10k_err("Could not init hif: %d\n", ret);
2230 ar->state = ATH10K_STATE_OFF; 2441 goto err_off;
2231 goto exit;
2232 } 2442 }
2233 2443
2234 ret = ath10k_core_start(ar); 2444 ret = ath10k_core_start(ar);
2235 if (ret) { 2445 if (ret) {
2236 ath10k_err("could not init core (%d)\n", ret); 2446 ath10k_err("Could not init core: %d\n", ret);
2237 ath10k_hif_power_down(ar); 2447 goto err_power_down;
2238 ar->state = ATH10K_STATE_OFF;
2239 goto exit;
2240 } 2448 }
2241 2449
2242 if (ar->state == ATH10K_STATE_OFF)
2243 ar->state = ATH10K_STATE_ON;
2244 else if (ar->state == ATH10K_STATE_RESTARTING)
2245 ar->state = ATH10K_STATE_RESTARTED;
2246
2247 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); 2450 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
2248 if (ret) 2451 if (ret) {
2249 ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", 2452 ath10k_warn("failed to enable PMF QOS: %d\n", ret);
2250 ret); 2453 goto err_core_stop;
2454 }
2251 2455
2252 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); 2456 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
2253 if (ret) 2457 if (ret) {
2254 ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", 2458 ath10k_warn("failed to enable dynamic BW: %d\n", ret);
2255 ret); 2459 goto err_core_stop;
2460 }
2461
2462 if (ar->cfg_tx_chainmask)
2463 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
2464 ar->cfg_rx_chainmask);
2256 2465
2257 /* 2466 /*
2258 * By default FW set ARP frames ac to voice (6). In that case ARP 2467 * By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,15 +2475,27 @@ static int ath10k_start(struct ieee80211_hw *hw)
2266 ret = ath10k_wmi_pdev_set_param(ar, 2475 ret = ath10k_wmi_pdev_set_param(ar,
2267 ar->wmi.pdev_param->arp_ac_override, 0); 2476 ar->wmi.pdev_param->arp_ac_override, 0);
2268 if (ret) { 2477 if (ret) {
2269 ath10k_warn("could not set arp ac override parameter: %d\n", 2478 ath10k_warn("failed to set arp ac override parameter: %d\n",
2270 ret); 2479 ret);
2271 goto exit; 2480 goto err_core_stop;
2272 } 2481 }
2273 2482
2483 ar->num_started_vdevs = 0;
2274 ath10k_regd_update(ar); 2484 ath10k_regd_update(ar);
2275 ret = 0;
2276 2485
2277exit: 2486 mutex_unlock(&ar->conf_mutex);
2487 return 0;
2488
2489err_core_stop:
2490 ath10k_core_stop(ar);
2491
2492err_power_down:
2493 ath10k_hif_power_down(ar);
2494
2495err_off:
2496 ar->state = ATH10K_STATE_OFF;
2497
2498err:
2278 mutex_unlock(&ar->conf_mutex); 2499 mutex_unlock(&ar->conf_mutex);
2279 return ret; 2500 return ret;
2280} 2501}
@@ -2283,19 +2504,15 @@ static void ath10k_stop(struct ieee80211_hw *hw)
2283{ 2504{
2284 struct ath10k *ar = hw->priv; 2505 struct ath10k *ar = hw->priv;
2285 2506
2507 ath10k_drain_tx(ar);
2508
2286 mutex_lock(&ar->conf_mutex); 2509 mutex_lock(&ar->conf_mutex);
2287 if (ar->state == ATH10K_STATE_ON || 2510 if (ar->state != ATH10K_STATE_OFF) {
2288 ar->state == ATH10K_STATE_RESTARTED ||
2289 ar->state == ATH10K_STATE_WEDGED)
2290 ath10k_halt(ar); 2511 ath10k_halt(ar);
2291 2512 ar->state = ATH10K_STATE_OFF;
2292 ar->state = ATH10K_STATE_OFF; 2513 }
2293 mutex_unlock(&ar->conf_mutex); 2514 mutex_unlock(&ar->conf_mutex);
2294 2515
2295 ath10k_mgmt_over_wmi_tx_purge(ar);
2296
2297 cancel_work_sync(&ar->offchan_tx_work);
2298 cancel_work_sync(&ar->wmi_mgmt_tx_work);
2299 cancel_work_sync(&ar->restart_work); 2516 cancel_work_sync(&ar->restart_work);
2300} 2517}
2301 2518
@@ -2309,7 +2526,7 @@ static int ath10k_config_ps(struct ath10k *ar)
2309 list_for_each_entry(arvif, &ar->arvifs, list) { 2526 list_for_each_entry(arvif, &ar->arvifs, list) {
2310 ret = ath10k_mac_vif_setup_ps(arvif); 2527 ret = ath10k_mac_vif_setup_ps(arvif);
2311 if (ret) { 2528 if (ret) {
2312 ath10k_warn("could not setup powersave (%d)\n", ret); 2529 ath10k_warn("failed to setup powersave: %d\n", ret);
2313 break; 2530 break;
2314 } 2531 }
2315 } 2532 }
@@ -2343,7 +2560,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
2343static void ath10k_config_chan(struct ath10k *ar) 2560static void ath10k_config_chan(struct ath10k *ar)
2344{ 2561{
2345 struct ath10k_vif *arvif; 2562 struct ath10k_vif *arvif;
2346 bool monitor_was_enabled;
2347 int ret; 2563 int ret;
2348 2564
2349 lockdep_assert_held(&ar->conf_mutex); 2565 lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2573,8 @@ static void ath10k_config_chan(struct ath10k *ar)
2357 2573
2358 /* First stop monitor interface. Some FW versions crash if there's a 2574 /* First stop monitor interface. Some FW versions crash if there's a
2359 * lone monitor interface. */ 2575 * lone monitor interface. */
2360 monitor_was_enabled = ar->monitor_enabled; 2576 if (ar->monitor_started)
2361 2577 ath10k_monitor_vdev_stop(ar);
2362 if (ar->monitor_enabled)
2363 ath10k_monitor_stop(ar);
2364 2578
2365 list_for_each_entry(arvif, &ar->arvifs, list) { 2579 list_for_each_entry(arvif, &ar->arvifs, list) {
2366 if (!arvif->is_started) 2580 if (!arvif->is_started)
@@ -2371,7 +2585,7 @@ static void ath10k_config_chan(struct ath10k *ar)
2371 2585
2372 ret = ath10k_vdev_stop(arvif); 2586 ret = ath10k_vdev_stop(arvif);
2373 if (ret) { 2587 if (ret) {
2374 ath10k_warn("could not stop vdev %d (%d)\n", 2588 ath10k_warn("failed to stop vdev %d: %d\n",
2375 arvif->vdev_id, ret); 2589 arvif->vdev_id, ret);
2376 continue; 2590 continue;
2377 } 2591 }
@@ -2388,7 +2602,7 @@ static void ath10k_config_chan(struct ath10k *ar)
2388 2602
2389 ret = ath10k_vdev_start(arvif); 2603 ret = ath10k_vdev_start(arvif);
2390 if (ret) { 2604 if (ret) {
2391 ath10k_warn("could not start vdev %d (%d)\n", 2605 ath10k_warn("failed to start vdev %d: %d\n",
2392 arvif->vdev_id, ret); 2606 arvif->vdev_id, ret);
2393 continue; 2607 continue;
2394 } 2608 }
@@ -2399,14 +2613,14 @@ static void ath10k_config_chan(struct ath10k *ar)
2399 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 2613 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
2400 arvif->bssid); 2614 arvif->bssid);
2401 if (ret) { 2615 if (ret) {
2402 ath10k_warn("could not bring vdev up %d (%d)\n", 2616 ath10k_warn("failed to bring vdev up %d: %d\n",
2403 arvif->vdev_id, ret); 2617 arvif->vdev_id, ret);
2404 continue; 2618 continue;
2405 } 2619 }
2406 } 2620 }
2407 2621
2408 if (monitor_was_enabled) 2622 if (ath10k_monitor_is_enabled(ar))
2409 ath10k_monitor_start(ar, ar->monitor_vdev_id); 2623 ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
2410} 2624}
2411 2625
2412static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 2626static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2634,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2420 2634
2421 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 2635 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2422 ath10k_dbg(ATH10K_DBG_MAC, 2636 ath10k_dbg(ATH10K_DBG_MAC,
2423 "mac config channel %d mhz flags 0x%x\n", 2637 "mac config channel %dMHz flags 0x%x radar %d\n",
2424 conf->chandef.chan->center_freq, 2638 conf->chandef.chan->center_freq,
2425 conf->chandef.chan->flags); 2639 conf->chandef.chan->flags,
2640 conf->radar_enabled);
2426 2641
2427 spin_lock_bh(&ar->data_lock); 2642 spin_lock_bh(&ar->data_lock);
2428 ar->rx_channel = conf->chandef.chan; 2643 ar->rx_channel = conf->chandef.chan;
2429 spin_unlock_bh(&ar->data_lock); 2644 spin_unlock_bh(&ar->data_lock);
2430 2645
2431 ath10k_config_radar_detection(ar); 2646 ar->radar_enabled = conf->radar_enabled;
2647 ath10k_recalc_radar_detection(ar);
2432 2648
2433 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) { 2649 if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
2434 ar->chandef = conf->chandef; 2650 ar->chandef = conf->chandef;
@@ -2444,14 +2660,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2444 ret = ath10k_wmi_pdev_set_param(ar, param, 2660 ret = ath10k_wmi_pdev_set_param(ar, param,
2445 hw->conf.power_level * 2); 2661 hw->conf.power_level * 2);
2446 if (ret) 2662 if (ret)
2447 ath10k_warn("mac failed to set 2g txpower %d (%d)\n", 2663 ath10k_warn("failed to set 2g txpower %d: %d\n",
2448 hw->conf.power_level, ret); 2664 hw->conf.power_level, ret);
2449 2665
2450 param = ar->wmi.pdev_param->txpower_limit5g; 2666 param = ar->wmi.pdev_param->txpower_limit5g;
2451 ret = ath10k_wmi_pdev_set_param(ar, param, 2667 ret = ath10k_wmi_pdev_set_param(ar, param,
2452 hw->conf.power_level * 2); 2668 hw->conf.power_level * 2);
2453 if (ret) 2669 if (ret)
2454 ath10k_warn("mac failed to set 5g txpower %d (%d)\n", 2670 ath10k_warn("failed to set 5g txpower %d: %d\n",
2455 hw->conf.power_level, ret); 2671 hw->conf.power_level, ret);
2456 } 2672 }
2457 2673
@@ -2459,10 +2675,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
2459 ath10k_config_ps(ar); 2675 ath10k_config_ps(ar);
2460 2676
2461 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 2677 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
2462 if (conf->flags & IEEE80211_CONF_MONITOR) 2678 if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
2463 ret = ath10k_monitor_create(ar); 2679 ar->monitor = true;
2464 else 2680 ret = ath10k_monitor_start(ar);
2465 ret = ath10k_monitor_destroy(ar); 2681 if (ret) {
2682 ath10k_warn("failed to start monitor (config): %d\n",
2683 ret);
2684 ar->monitor = false;
2685 }
2686 } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
2687 ar->monitor) {
2688 ar->monitor = false;
2689 ath10k_monitor_stop(ar);
2690 }
2466 } 2691 }
2467 2692
2468 mutex_unlock(&ar->conf_mutex); 2693 mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2722,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2497 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); 2722 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
2498 INIT_LIST_HEAD(&arvif->list); 2723 INIT_LIST_HEAD(&arvif->list);
2499 2724
2500 if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
2501 ath10k_warn("Only one monitor interface allowed\n");
2502 ret = -EBUSY;
2503 goto err;
2504 }
2505
2506 bit = ffs(ar->free_vdev_map); 2725 bit = ffs(ar->free_vdev_map);
2507 if (bit == 0) { 2726 if (bit == 0) {
2508 ret = -EBUSY; 2727 ret = -EBUSY;
@@ -2545,7 +2764,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2545 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 2764 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
2546 arvif->vdev_subtype, vif->addr); 2765 arvif->vdev_subtype, vif->addr);
2547 if (ret) { 2766 if (ret) {
2548 ath10k_warn("WMI vdev %i create failed: ret %d\n", 2767 ath10k_warn("failed to create WMI vdev %i: %d\n",
2549 arvif->vdev_id, ret); 2768 arvif->vdev_id, ret);
2550 goto err; 2769 goto err;
2551 } 2770 }
@@ -2557,7 +2776,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2557 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, 2776 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
2558 arvif->def_wep_key_idx); 2777 arvif->def_wep_key_idx);
2559 if (ret) { 2778 if (ret) {
2560 ath10k_warn("Failed to set vdev %i default keyid: %d\n", 2779 ath10k_warn("failed to set vdev %i default key id: %d\n",
2561 arvif->vdev_id, ret); 2780 arvif->vdev_id, ret);
2562 goto err_vdev_delete; 2781 goto err_vdev_delete;
2563 } 2782 }
@@ -2567,7 +2786,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2567 ATH10K_HW_TXRX_NATIVE_WIFI); 2786 ATH10K_HW_TXRX_NATIVE_WIFI);
2568 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 2787 /* 10.X firmware does not support this VDEV parameter. Do not warn */
2569 if (ret && ret != -EOPNOTSUPP) { 2788 if (ret && ret != -EOPNOTSUPP) {
2570 ath10k_warn("Failed to set vdev %i TX encap: %d\n", 2789 ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
2571 arvif->vdev_id, ret); 2790 arvif->vdev_id, ret);
2572 goto err_vdev_delete; 2791 goto err_vdev_delete;
2573 } 2792 }
@@ -2575,14 +2794,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2575 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2794 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2576 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); 2795 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
2577 if (ret) { 2796 if (ret) {
2578 ath10k_warn("Failed to create vdev %i peer for AP: %d\n", 2797 ath10k_warn("failed to create vdev %i peer for AP: %d\n",
2579 arvif->vdev_id, ret); 2798 arvif->vdev_id, ret);
2580 goto err_vdev_delete; 2799 goto err_vdev_delete;
2581 } 2800 }
2582 2801
2583 ret = ath10k_mac_set_kickout(arvif); 2802 ret = ath10k_mac_set_kickout(arvif);
2584 if (ret) { 2803 if (ret) {
2585 ath10k_warn("Failed to set vdev %i kickout parameters: %d\n", 2804 ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
2586 arvif->vdev_id, ret); 2805 arvif->vdev_id, ret);
2587 goto err_peer_delete; 2806 goto err_peer_delete;
2588 } 2807 }
@@ -2594,7 +2813,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2594 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2813 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2595 param, value); 2814 param, value);
2596 if (ret) { 2815 if (ret) {
2597 ath10k_warn("Failed to set vdev %i RX wake policy: %d\n", 2816 ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
2598 arvif->vdev_id, ret); 2817 arvif->vdev_id, ret);
2599 goto err_peer_delete; 2818 goto err_peer_delete;
2600 } 2819 }
@@ -2604,7 +2823,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2604 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2823 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2605 param, value); 2824 param, value);
2606 if (ret) { 2825 if (ret) {
2607 ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n", 2826 ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
2608 arvif->vdev_id, ret); 2827 arvif->vdev_id, ret);
2609 goto err_peer_delete; 2828 goto err_peer_delete;
2610 } 2829 }
@@ -2614,7 +2833,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2614 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2833 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2615 param, value); 2834 param, value);
2616 if (ret) { 2835 if (ret) {
2617 ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n", 2836 ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
2618 arvif->vdev_id, ret); 2837 arvif->vdev_id, ret);
2619 goto err_peer_delete; 2838 goto err_peer_delete;
2620 } 2839 }
@@ -2622,21 +2841,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2622 2841
2623 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 2842 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
2624 if (ret) { 2843 if (ret) {
2625 ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", 2844 ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
2626 arvif->vdev_id, ret); 2845 arvif->vdev_id, ret);
2627 goto err_peer_delete; 2846 goto err_peer_delete;
2628 } 2847 }
2629 2848
2630 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); 2849 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
2631 if (ret) { 2850 if (ret) {
2632 ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", 2851 ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
2633 arvif->vdev_id, ret); 2852 arvif->vdev_id, ret);
2634 goto err_peer_delete; 2853 goto err_peer_delete;
2635 } 2854 }
2636 2855
2637 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2638 ar->monitor_present = true;
2639
2640 mutex_unlock(&ar->conf_mutex); 2856 mutex_unlock(&ar->conf_mutex);
2641 return 0; 2857 return 0;
2642 2858
@@ -2668,6 +2884,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2668 2884
2669 spin_lock_bh(&ar->data_lock); 2885 spin_lock_bh(&ar->data_lock);
2670 if (arvif->beacon) { 2886 if (arvif->beacon) {
2887 dma_unmap_single(arvif->ar->dev,
2888 ATH10K_SKB_CB(arvif->beacon)->paddr,
2889 arvif->beacon->len, DMA_TO_DEVICE);
2671 dev_kfree_skb_any(arvif->beacon); 2890 dev_kfree_skb_any(arvif->beacon);
2672 arvif->beacon = NULL; 2891 arvif->beacon = NULL;
2673 } 2892 }
@@ -2679,7 +2898,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2679 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2898 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2680 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); 2899 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
2681 if (ret) 2900 if (ret)
2682 ath10k_warn("Failed to remove peer for AP vdev %i: %d\n", 2901 ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
2683 arvif->vdev_id, ret); 2902 arvif->vdev_id, ret);
2684 2903
2685 kfree(arvif->u.ap.noa_data); 2904 kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2909,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2690 2909
2691 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 2910 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
2692 if (ret) 2911 if (ret)
2693 ath10k_warn("WMI vdev %i delete failed: %d\n", 2912 ath10k_warn("failed to delete WMI vdev %i: %d\n",
2694 arvif->vdev_id, ret); 2913 arvif->vdev_id, ret);
2695 2914
2696 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2697 ar->monitor_present = false;
2698
2699 ath10k_peer_cleanup(ar, arvif->vdev_id); 2915 ath10k_peer_cleanup(ar, arvif->vdev_id);
2700 2916
2701 mutex_unlock(&ar->conf_mutex); 2917 mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2944,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
2728 *total_flags &= SUPPORTED_FILTERS; 2944 *total_flags &= SUPPORTED_FILTERS;
2729 ar->filter_flags = *total_flags; 2945 ar->filter_flags = *total_flags;
2730 2946
2731 /* Monitor must not be started if it wasn't created first. 2947 if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
2732 * Promiscuous mode may be started on a non-monitor interface - in 2948 ar->promisc = true;
2733 * such case the monitor vdev is not created so starting the 2949 ret = ath10k_monitor_start(ar);
2734 * monitor makes no sense. Since ath10k uses no special RX filters 2950 if (ret) {
2735 * (only BSS filter in STA mode) there's no need for any special 2951 ath10k_warn("failed to start monitor (promisc): %d\n",
2736 * action here. */ 2952 ret);
2737 if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && 2953 ar->promisc = false;
2738 !ar->monitor_enabled && ar->monitor_present) { 2954 }
2739 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n", 2955 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
2740 ar->monitor_vdev_id); 2956 ar->promisc = false;
2741 2957 ath10k_monitor_stop(ar);
2742 ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
2743 if (ret)
2744 ath10k_warn("Unable to start monitor mode\n");
2745 } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
2746 ar->monitor_enabled && ar->monitor_present) {
2747 ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
2748 ar->monitor_vdev_id);
2749
2750 ret = ath10k_monitor_stop(ar);
2751 if (ret)
2752 ath10k_warn("Unable to stop monitor mode\n");
2753 } 2958 }
2754 2959
2755 mutex_unlock(&ar->conf_mutex); 2960 mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2985,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2780 arvif->vdev_id, arvif->beacon_interval); 2985 arvif->vdev_id, arvif->beacon_interval);
2781 2986
2782 if (ret) 2987 if (ret)
2783 ath10k_warn("Failed to set beacon interval for vdev %d: %i\n", 2988 ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
2784 arvif->vdev_id, ret); 2989 arvif->vdev_id, ret);
2785 } 2990 }
2786 2991
@@ -2793,7 +2998,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2793 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 2998 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
2794 WMI_BEACON_STAGGERED_MODE); 2999 WMI_BEACON_STAGGERED_MODE);
2795 if (ret) 3000 if (ret)
2796 ath10k_warn("Failed to set beacon mode for vdev %d: %i\n", 3001 ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
2797 arvif->vdev_id, ret); 3002 arvif->vdev_id, ret);
2798 } 3003 }
2799 3004
@@ -2808,7 +3013,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2808 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3013 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2809 arvif->dtim_period); 3014 arvif->dtim_period);
2810 if (ret) 3015 if (ret)
2811 ath10k_warn("Failed to set dtim period for vdev %d: %i\n", 3016 ath10k_warn("failed to set dtim period for vdev %d: %i\n",
2812 arvif->vdev_id, ret); 3017 arvif->vdev_id, ret);
2813 } 3018 }
2814 3019
@@ -2820,7 +3025,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2820 arvif->u.ap.hidden_ssid = info->hidden_ssid; 3025 arvif->u.ap.hidden_ssid = info->hidden_ssid;
2821 } 3026 }
2822 3027
2823 if (changed & BSS_CHANGED_BSSID) { 3028 /*
3029 * Firmware manages AP self-peer internally so make sure to not create
3030 * it in driver. Otherwise AP self-peer deletion may timeout later.
3031 */
3032 if (changed & BSS_CHANGED_BSSID &&
3033 vif->type != NL80211_IFTYPE_AP) {
2824 if (!is_zero_ether_addr(info->bssid)) { 3034 if (!is_zero_ether_addr(info->bssid)) {
2825 ath10k_dbg(ATH10K_DBG_MAC, 3035 ath10k_dbg(ATH10K_DBG_MAC,
2826 "mac vdev %d create peer %pM\n", 3036 "mac vdev %d create peer %pM\n",
@@ -2829,7 +3039,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2829 ret = ath10k_peer_create(ar, arvif->vdev_id, 3039 ret = ath10k_peer_create(ar, arvif->vdev_id,
2830 info->bssid); 3040 info->bssid);
2831 if (ret) 3041 if (ret)
2832 ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n", 3042 ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
2833 info->bssid, arvif->vdev_id, ret); 3043 info->bssid, arvif->vdev_id, ret);
2834 3044
2835 if (vif->type == NL80211_IFTYPE_STATION) { 3045 if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +3078,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2868 ath10k_control_beaconing(arvif, info); 3078 ath10k_control_beaconing(arvif, info);
2869 3079
2870 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 3080 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2871 u32 cts_prot; 3081 arvif->use_cts_prot = info->use_cts_prot;
2872 if (info->use_cts_prot)
2873 cts_prot = 1;
2874 else
2875 cts_prot = 0;
2876
2877 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", 3082 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
2878 arvif->vdev_id, cts_prot); 3083 arvif->vdev_id, info->use_cts_prot);
2879 3084
2880 vdev_param = ar->wmi.vdev_param->enable_rtscts; 3085 ret = ath10k_recalc_rtscts_prot(arvif);
2881 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2882 cts_prot);
2883 if (ret) 3086 if (ret)
2884 ath10k_warn("Failed to set CTS prot for vdev %d: %d\n", 3087 ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
2885 arvif->vdev_id, ret); 3088 arvif->vdev_id, ret);
2886 } 3089 }
2887 3090
@@ -2900,7 +3103,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2900 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3103 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2901 slottime); 3104 slottime);
2902 if (ret) 3105 if (ret)
2903 ath10k_warn("Failed to set erp slot for vdev %d: %i\n", 3106 ath10k_warn("failed to set erp slot for vdev %d: %i\n",
2904 arvif->vdev_id, ret); 3107 arvif->vdev_id, ret);
2905 } 3108 }
2906 3109
@@ -2919,7 +3122,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2919 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3122 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2920 preamble); 3123 preamble);
2921 if (ret) 3124 if (ret)
2922 ath10k_warn("Failed to set preamble for vdev %d: %i\n", 3125 ath10k_warn("failed to set preamble for vdev %d: %i\n",
2923 arvif->vdev_id, ret); 3126 arvif->vdev_id, ret);
2924 } 3127 }
2925 3128
@@ -2990,7 +3193,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
2990 3193
2991 ret = ath10k_start_scan(ar, &arg); 3194 ret = ath10k_start_scan(ar, &arg);
2992 if (ret) { 3195 if (ret) {
2993 ath10k_warn("could not start hw scan (%d)\n", ret); 3196 ath10k_warn("failed to start hw scan: %d\n", ret);
2994 spin_lock_bh(&ar->data_lock); 3197 spin_lock_bh(&ar->data_lock);
2995 ar->scan.in_progress = false; 3198 ar->scan.in_progress = false;
2996 spin_unlock_bh(&ar->data_lock); 3199 spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3213,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
3010 mutex_lock(&ar->conf_mutex); 3213 mutex_lock(&ar->conf_mutex);
3011 ret = ath10k_abort_scan(ar); 3214 ret = ath10k_abort_scan(ar);
3012 if (ret) { 3215 if (ret) {
3013 ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n", 3216 ath10k_warn("failed to abort scan: %d\n", ret);
3014 ret);
3015 ieee80211_scan_completed(hw, 1 /* aborted */); 3217 ieee80211_scan_completed(hw, 1 /* aborted */);
3016 } 3218 }
3017 mutex_unlock(&ar->conf_mutex); 3219 mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3291,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3089 3291
3090 if (!peer) { 3292 if (!peer) {
3091 if (cmd == SET_KEY) { 3293 if (cmd == SET_KEY) {
3092 ath10k_warn("cannot install key for non-existent peer %pM\n", 3294 ath10k_warn("failed to install key for non-existent peer %pM\n",
3093 peer_addr); 3295 peer_addr);
3094 ret = -EOPNOTSUPP; 3296 ret = -EOPNOTSUPP;
3095 goto exit; 3297 goto exit;
@@ -3112,7 +3314,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3112 3314
3113 ret = ath10k_install_key(arvif, key, cmd, peer_addr); 3315 ret = ath10k_install_key(arvif, key, cmd, peer_addr);
3114 if (ret) { 3316 if (ret) {
3115 ath10k_warn("key installation failed for vdev %i peer %pM: %d\n", 3317 ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
3116 arvif->vdev_id, peer_addr, ret); 3318 arvif->vdev_id, peer_addr, ret);
3117 goto exit; 3319 goto exit;
3118 } 3320 }
@@ -3127,7 +3329,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3127 peer->keys[key->keyidx] = NULL; 3329 peer->keys[key->keyidx] = NULL;
3128 else if (peer == NULL) 3330 else if (peer == NULL)
3129 /* impossible unless FW goes crazy */ 3331 /* impossible unless FW goes crazy */
3130 ath10k_warn("peer %pM disappeared!\n", peer_addr); 3332 ath10k_warn("Peer %pM disappeared!\n", peer_addr);
3131 spin_unlock_bh(&ar->data_lock); 3333 spin_unlock_bh(&ar->data_lock);
3132 3334
3133exit: 3335exit:
@@ -3195,6 +3397,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3195 sta->addr, smps, err); 3397 sta->addr, smps, err);
3196 } 3398 }
3197 3399
3400 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
3401 ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
3402 sta->addr);
3403
3404 err = ath10k_station_assoc(ar, arvif, sta, true);
3405 if (err)
3406 ath10k_warn("failed to reassociate station: %pM\n",
3407 sta->addr);
3408 }
3409
3198 mutex_unlock(&ar->conf_mutex); 3410 mutex_unlock(&ar->conf_mutex);
3199} 3411}
3200 3412
@@ -3236,7 +3448,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3236 max_num_peers = TARGET_NUM_PEERS; 3448 max_num_peers = TARGET_NUM_PEERS;
3237 3449
3238 if (ar->num_peers >= max_num_peers) { 3450 if (ar->num_peers >= max_num_peers) {
3239 ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n", 3451 ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
3240 ar->num_peers, max_num_peers); 3452 ar->num_peers, max_num_peers);
3241 ret = -ENOBUFS; 3453 ret = -ENOBUFS;
3242 goto exit; 3454 goto exit;
@@ -3248,7 +3460,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3248 3460
3249 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); 3461 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
3250 if (ret) 3462 if (ret)
3251 ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n", 3463 ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
3252 sta->addr, arvif->vdev_id, ret); 3464 sta->addr, arvif->vdev_id, ret);
3253 } else if ((old_state == IEEE80211_STA_NONE && 3465 } else if ((old_state == IEEE80211_STA_NONE &&
3254 new_state == IEEE80211_STA_NOTEXIST)) { 3466 new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3472,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3260 arvif->vdev_id, sta->addr); 3472 arvif->vdev_id, sta->addr);
3261 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 3473 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
3262 if (ret) 3474 if (ret)
3263 ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n", 3475 ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
3264 sta->addr, arvif->vdev_id, ret); 3476 sta->addr, arvif->vdev_id, ret);
3265 3477
3266 if (vif->type == NL80211_IFTYPE_STATION) 3478 if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3487,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3275 ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n", 3487 ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
3276 sta->addr); 3488 sta->addr);
3277 3489
3278 ret = ath10k_station_assoc(ar, arvif, sta); 3490 ret = ath10k_station_assoc(ar, arvif, sta, false);
3279 if (ret) 3491 if (ret)
3280 ath10k_warn("Failed to associate station %pM for vdev %i: %i\n", 3492 ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
3281 sta->addr, arvif->vdev_id, ret); 3493 sta->addr, arvif->vdev_id, ret);
3282 } else if (old_state == IEEE80211_STA_ASSOC && 3494 } else if (old_state == IEEE80211_STA_ASSOC &&
3283 new_state == IEEE80211_STA_AUTH && 3495 new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3503,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3291 3503
3292 ret = ath10k_station_disassoc(ar, arvif, sta); 3504 ret = ath10k_station_disassoc(ar, arvif, sta);
3293 if (ret) 3505 if (ret)
3294 ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n", 3506 ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
3295 sta->addr, arvif->vdev_id, ret); 3507 sta->addr, arvif->vdev_id, ret);
3296 } 3508 }
3297exit: 3509exit:
@@ -3339,7 +3551,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3339 WMI_STA_PS_PARAM_UAPSD, 3551 WMI_STA_PS_PARAM_UAPSD,
3340 arvif->u.sta.uapsd); 3552 arvif->u.sta.uapsd);
3341 if (ret) { 3553 if (ret) {
3342 ath10k_warn("could not set uapsd params %d\n", ret); 3554 ath10k_warn("failed to set uapsd params: %d\n", ret);
3343 goto exit; 3555 goto exit;
3344 } 3556 }
3345 3557
@@ -3352,7 +3564,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3352 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 3564 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
3353 value); 3565 value);
3354 if (ret) 3566 if (ret)
3355 ath10k_warn("could not set rx wake param %d\n", ret); 3567 ath10k_warn("failed to set rx wake param: %d\n", ret);
3356 3568
3357exit: 3569exit:
3358 return ret; 3570 return ret;
@@ -3402,13 +3614,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
3402 /* FIXME: FW accepts wmm params per hw, not per vif */ 3614 /* FIXME: FW accepts wmm params per hw, not per vif */
3403 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); 3615 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
3404 if (ret) { 3616 if (ret) {
3405 ath10k_warn("could not set wmm params %d\n", ret); 3617 ath10k_warn("failed to set wmm params: %d\n", ret);
3406 goto exit; 3618 goto exit;
3407 } 3619 }
3408 3620
3409 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 3621 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
3410 if (ret) 3622 if (ret)
3411 ath10k_warn("could not set sta uapsd %d\n", ret); 3623 ath10k_warn("failed to set sta uapsd: %d\n", ret);
3412 3624
3413exit: 3625exit:
3414 mutex_unlock(&ar->conf_mutex); 3626 mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3673,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
3461 3673
3462 ret = ath10k_start_scan(ar, &arg); 3674 ret = ath10k_start_scan(ar, &arg);
3463 if (ret) { 3675 if (ret) {
3464 ath10k_warn("could not start roc scan (%d)\n", ret); 3676 ath10k_warn("failed to start roc scan: %d\n", ret);
3465 spin_lock_bh(&ar->data_lock); 3677 spin_lock_bh(&ar->data_lock);
3466 ar->scan.in_progress = false; 3678 ar->scan.in_progress = false;
3467 spin_unlock_bh(&ar->data_lock); 3679 spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3682,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
3470 3682
3471 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); 3683 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
3472 if (ret == 0) { 3684 if (ret == 0) {
3473 ath10k_warn("could not switch to channel for roc scan\n"); 3685 ath10k_warn("failed to switch to channel for roc scan\n");
3474 ath10k_abort_scan(ar); 3686 ath10k_abort_scan(ar);
3475 ret = -ETIMEDOUT; 3687 ret = -ETIMEDOUT;
3476 goto exit; 3688 goto exit;
@@ -3511,7 +3723,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3511 3723
3512 ret = ath10k_mac_set_rts(arvif, value); 3724 ret = ath10k_mac_set_rts(arvif, value);
3513 if (ret) { 3725 if (ret) {
3514 ath10k_warn("could not set rts threshold for vdev %d (%d)\n", 3726 ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
3515 arvif->vdev_id, ret); 3727 arvif->vdev_id, ret);
3516 break; 3728 break;
3517 } 3729 }
@@ -3534,7 +3746,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3534 3746
3535 ret = ath10k_mac_set_rts(arvif, value); 3747 ret = ath10k_mac_set_rts(arvif, value);
3536 if (ret) { 3748 if (ret) {
3537 ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n", 3749 ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
3538 arvif->vdev_id, ret); 3750 arvif->vdev_id, ret);
3539 break; 3751 break;
3540 } 3752 }
@@ -3544,7 +3756,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3544 return ret; 3756 return ret;
3545} 3757}
3546 3758
3547static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 3759static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3760 u32 queues, bool drop)
3548{ 3761{
3549 struct ath10k *ar = hw->priv; 3762 struct ath10k *ar = hw->priv;
3550 bool skip; 3763 bool skip;
@@ -3573,7 +3786,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
3573 }), ATH10K_FLUSH_TIMEOUT_HZ); 3786 }), ATH10K_FLUSH_TIMEOUT_HZ);
3574 3787
3575 if (ret <= 0 || skip) 3788 if (ret <= 0 || skip)
3576 ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n", 3789 ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
3577 skip, ar->state, ret); 3790 skip, ar->state, ret);
3578 3791
3579skip: 3792skip:
@@ -3608,7 +3821,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
3608 3821
3609 ret = ath10k_hif_suspend(ar); 3822 ret = ath10k_hif_suspend(ar);
3610 if (ret) { 3823 if (ret) {
3611 ath10k_warn("could not suspend hif (%d)\n", ret); 3824 ath10k_warn("failed to suspend hif: %d\n", ret);
3612 goto resume; 3825 goto resume;
3613 } 3826 }
3614 3827
@@ -3617,7 +3830,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
3617resume: 3830resume:
3618 ret = ath10k_wmi_pdev_resume_target(ar); 3831 ret = ath10k_wmi_pdev_resume_target(ar);
3619 if (ret) 3832 if (ret)
3620 ath10k_warn("could not resume target (%d)\n", ret); 3833 ath10k_warn("failed to resume target: %d\n", ret);
3621 3834
3622 ret = 1; 3835 ret = 1;
3623exit: 3836exit:
@@ -3634,14 +3847,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
3634 3847
3635 ret = ath10k_hif_resume(ar); 3848 ret = ath10k_hif_resume(ar);
3636 if (ret) { 3849 if (ret) {
3637 ath10k_warn("could not resume hif (%d)\n", ret); 3850 ath10k_warn("failed to resume hif: %d\n", ret);
3638 ret = 1; 3851 ret = 1;
3639 goto exit; 3852 goto exit;
3640 } 3853 }
3641 3854
3642 ret = ath10k_wmi_pdev_resume_target(ar); 3855 ret = ath10k_wmi_pdev_resume_target(ar);
3643 if (ret) { 3856 if (ret) {
3644 ath10k_warn("could not resume target (%d)\n", ret); 3857 ath10k_warn("failed to resume target: %d\n", ret);
3645 ret = 1; 3858 ret = 1;
3646 goto exit; 3859 goto exit;
3647 } 3860 }
@@ -3964,7 +4177,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3964 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4177 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
3965 vdev_param, fixed_rate); 4178 vdev_param, fixed_rate);
3966 if (ret) { 4179 if (ret) {
3967 ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n", 4180 ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
3968 fixed_rate, ret); 4181 fixed_rate, ret);
3969 ret = -EINVAL; 4182 ret = -EINVAL;
3970 goto exit; 4183 goto exit;
@@ -3977,7 +4190,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3977 vdev_param, fixed_nss); 4190 vdev_param, fixed_nss);
3978 4191
3979 if (ret) { 4192 if (ret) {
3980 ath10k_warn("Could not set fixed_nss param %d: %d\n", 4193 ath10k_warn("failed to set fixed nss param %d: %d\n",
3981 fixed_nss, ret); 4194 fixed_nss, ret);
3982 ret = -EINVAL; 4195 ret = -EINVAL;
3983 goto exit; 4196 goto exit;
@@ -3990,7 +4203,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3990 force_sgi); 4203 force_sgi);
3991 4204
3992 if (ret) { 4205 if (ret) {
3993 ath10k_warn("Could not set sgi param %d: %d\n", 4206 ath10k_warn("failed to set sgi param %d: %d\n",
3994 force_sgi, ret); 4207 force_sgi, ret);
3995 ret = -EINVAL; 4208 ret = -EINVAL;
3996 goto exit; 4209 goto exit;
@@ -4026,7 +4239,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
4026 } 4239 }
4027 4240
4028 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) { 4241 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
4029 ath10k_warn("Could not force SGI usage for default rate settings\n"); 4242 ath10k_warn("failed to force SGI usage for default rate settings\n");
4030 return -EINVAL; 4243 return -EINVAL;
4031 } 4244 }
4032 4245
@@ -4034,14 +4247,6 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
4034 fixed_nss, force_sgi); 4247 fixed_nss, force_sgi);
4035} 4248}
4036 4249
4037static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
4038 struct ieee80211_vif *vif,
4039 struct cfg80211_chan_def *chandef)
4040{
4041 /* there's no need to do anything here. vif->csa_active is enough */
4042 return;
4043}
4044
4045static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 4250static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4046 struct ieee80211_vif *vif, 4251 struct ieee80211_vif *vif,
4047 struct ieee80211_sta *sta, 4252 struct ieee80211_sta *sta,
@@ -4072,8 +4277,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4072 bw = WMI_PEER_CHWIDTH_80MHZ; 4277 bw = WMI_PEER_CHWIDTH_80MHZ;
4073 break; 4278 break;
4074 case IEEE80211_STA_RX_BW_160: 4279 case IEEE80211_STA_RX_BW_160:
4075 ath10k_warn("mac sta rc update for %pM: invalid bw %d\n", 4280 ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
4076 sta->addr, sta->bandwidth); 4281 sta->bandwidth, sta->addr);
4077 bw = WMI_PEER_CHWIDTH_20MHZ; 4282 bw = WMI_PEER_CHWIDTH_20MHZ;
4078 break; 4283 break;
4079 } 4284 }
@@ -4099,8 +4304,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4099 smps = WMI_PEER_SMPS_DYNAMIC; 4304 smps = WMI_PEER_SMPS_DYNAMIC;
4100 break; 4305 break;
4101 case IEEE80211_SMPS_NUM_MODES: 4306 case IEEE80211_SMPS_NUM_MODES:
4102 ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n", 4307 ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
4103 sta->addr, sta->smps_mode); 4308 sta->smps_mode, sta->addr);
4104 smps = WMI_PEER_SMPS_PS_NONE; 4309 smps = WMI_PEER_SMPS_PS_NONE;
4105 break; 4310 break;
4106 } 4311 }
@@ -4108,15 +4313,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4108 arsta->smps = smps; 4313 arsta->smps = smps;
4109 } 4314 }
4110 4315
4111 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
4112 /* FIXME: Not implemented. Probably the only way to do it would
4113 * be to re-assoc the peer. */
4114 changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
4115 ath10k_dbg(ATH10K_DBG_MAC,
4116 "mac sta rc update for %pM: changing supported rates not implemented\n",
4117 sta->addr);
4118 }
4119
4120 arsta->changed |= changed; 4316 arsta->changed |= changed;
4121 4317
4122 spin_unlock_bh(&ar->data_lock); 4318 spin_unlock_bh(&ar->data_lock);
@@ -4154,10 +4350,11 @@ static const struct ieee80211_ops ath10k_ops = {
4154 .set_frag_threshold = ath10k_set_frag_threshold, 4350 .set_frag_threshold = ath10k_set_frag_threshold,
4155 .flush = ath10k_flush, 4351 .flush = ath10k_flush,
4156 .tx_last_beacon = ath10k_tx_last_beacon, 4352 .tx_last_beacon = ath10k_tx_last_beacon,
4353 .set_antenna = ath10k_set_antenna,
4354 .get_antenna = ath10k_get_antenna,
4157 .restart_complete = ath10k_restart_complete, 4355 .restart_complete = ath10k_restart_complete,
4158 .get_survey = ath10k_get_survey, 4356 .get_survey = ath10k_get_survey,
4159 .set_bitrate_mask = ath10k_set_bitrate_mask, 4357 .set_bitrate_mask = ath10k_set_bitrate_mask,
4160 .channel_switch_beacon = ath10k_channel_switch_beacon,
4161 .sta_rc_update = ath10k_sta_rc_update, 4358 .sta_rc_update = ath10k_sta_rc_update,
4162 .get_tsf = ath10k_get_tsf, 4359 .get_tsf = ath10k_get_tsf,
4163#ifdef CONFIG_PM 4360#ifdef CONFIG_PM
@@ -4503,6 +4700,18 @@ int ath10k_mac_register(struct ath10k *ar)
4503 BIT(NL80211_IFTYPE_ADHOC) | 4700 BIT(NL80211_IFTYPE_ADHOC) |
4504 BIT(NL80211_IFTYPE_AP); 4701 BIT(NL80211_IFTYPE_AP);
4505 4702
4703 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
4704 /* TODO: Have to deal with 2x2 chips if/when the come out. */
4705 ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
4706 ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
4707 } else {
4708 ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
4709 ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
4710 }
4711
4712 ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
4713 ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
4714
4506 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) 4715 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
4507 ar->hw->wiphy->interface_modes |= 4716 ar->hw->wiphy->interface_modes |=
4508 BIT(NL80211_IFTYPE_P2P_CLIENT) | 4717 BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4516,7 +4725,6 @@ int ath10k_mac_register(struct ath10k *ar)
4516 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 4725 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4517 IEEE80211_HW_HAS_RATE_CONTROL | 4726 IEEE80211_HW_HAS_RATE_CONTROL |
4518 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 4727 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
4519 IEEE80211_HW_WANT_MONITOR_VIF |
4520 IEEE80211_HW_AP_LINK_PS | 4728 IEEE80211_HW_AP_LINK_PS |
4521 IEEE80211_HW_SPECTRUM_MGMT; 4729 IEEE80211_HW_SPECTRUM_MGMT;
4522 4730
@@ -4570,19 +4778,19 @@ int ath10k_mac_register(struct ath10k *ar)
4570 NL80211_DFS_UNSET); 4778 NL80211_DFS_UNSET);
4571 4779
4572 if (!ar->dfs_detector) 4780 if (!ar->dfs_detector)
4573 ath10k_warn("dfs pattern detector init failed\n"); 4781 ath10k_warn("failed to initialise DFS pattern detector\n");
4574 } 4782 }
4575 4783
4576 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 4784 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
4577 ath10k_reg_notifier); 4785 ath10k_reg_notifier);
4578 if (ret) { 4786 if (ret) {
4579 ath10k_err("Regulatory initialization failed: %i\n", ret); 4787 ath10k_err("failed to initialise regulatory: %i\n", ret);
4580 goto err_free; 4788 goto err_free;
4581 } 4789 }
4582 4790
4583 ret = ieee80211_register_hw(ar->hw); 4791 ret = ieee80211_register_hw(ar->hw);
4584 if (ret) { 4792 if (ret) {
4585 ath10k_err("ieee80211 registration failed: %d\n", ret); 4793 ath10k_err("failed to register ieee80211: %d\n", ret);
4586 goto err_free; 4794 goto err_free;
4587 } 4795 }
4588 4796
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9d242d801d9d..d0004d59c97e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -39,15 +39,28 @@ enum ath10k_pci_irq_mode {
39 ATH10K_PCI_IRQ_MSI = 2, 39 ATH10K_PCI_IRQ_MSI = 2,
40}; 40};
41 41
42static unsigned int ath10k_target_ps; 42enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
47static unsigned int ath10k_pci_target_ps;
43static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 48static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
44 50
45module_param(ath10k_target_ps, uint, 0644); 51module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
46MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); 52MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
47 53
48module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 54module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 55MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50 56
57module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
59
60/* how long wait to wait for target to initialise, in ms */
61#define ATH10K_PCI_TARGET_WAIT 3000
62#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
63
51#define QCA988X_2_0_DEVICE_ID (0x003c) 64#define QCA988X_2_0_DEVICE_ID (0x003c)
52 65
53static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { 66static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +359,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
346 * 2) Buffer in DMA-able space 359 * 2) Buffer in DMA-able space
347 */ 360 */
348 orig_nbytes = nbytes; 361 orig_nbytes = nbytes;
349 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, 362 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
350 orig_nbytes, 363 orig_nbytes,
351 &ce_data_base); 364 &ce_data_base,
365 GFP_ATOMIC);
352 366
353 if (!data_buf) { 367 if (!data_buf) {
354 ret = -ENOMEM; 368 ret = -ENOMEM;
@@ -442,12 +456,12 @@ done:
442 __le32_to_cpu(((__le32 *)data_buf)[i]); 456 __le32_to_cpu(((__le32 *)data_buf)[i]);
443 } 457 }
444 } else 458 } else
445 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", 459 ath10k_warn("failed to read diag value at 0x%x: %d\n",
446 __func__, address); 460 address, ret);
447 461
448 if (data_buf) 462 if (data_buf)
449 pci_free_consistent(ar_pci->pdev, orig_nbytes, 463 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
450 data_buf, ce_data_base); 464 ce_data_base);
451 465
452 return ret; 466 return ret;
453} 467}
@@ -490,9 +504,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
490 * 2) Buffer in DMA-able space 504 * 2) Buffer in DMA-able space
491 */ 505 */
492 orig_nbytes = nbytes; 506 orig_nbytes = nbytes;
493 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, 507 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
494 orig_nbytes, 508 orig_nbytes,
495 &ce_data_base); 509 &ce_data_base,
510 GFP_ATOMIC);
496 if (!data_buf) { 511 if (!data_buf) {
497 ret = -ENOMEM; 512 ret = -ENOMEM;
498 goto done; 513 goto done;
@@ -588,13 +603,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
588 603
589done: 604done:
590 if (data_buf) { 605 if (data_buf) {
591 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, 606 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
592 ce_data_base); 607 ce_data_base);
593 } 608 }
594 609
595 if (ret != 0) 610 if (ret != 0)
596 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, 611 ath10k_warn("failed to write diag value at 0x%x: %d\n",
597 address); 612 address, ret);
598 613
599 return ret; 614 return ret;
600} 615}
@@ -747,17 +762,21 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 762 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 763 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 764 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
750 unsigned int nentries_mask = src_ring->nentries_mask; 765 unsigned int nentries_mask;
751 unsigned int sw_index = src_ring->sw_index; 766 unsigned int sw_index;
752 unsigned int write_index = src_ring->write_index; 767 unsigned int write_index;
753 int err, i; 768 int err, i = 0;
754 769
755 spin_lock_bh(&ar_pci->ce_lock); 770 spin_lock_bh(&ar_pci->ce_lock);
756 771
772 nentries_mask = src_ring->nentries_mask;
773 sw_index = src_ring->sw_index;
774 write_index = src_ring->write_index;
775
757 if (unlikely(CE_RING_DELTA(nentries_mask, 776 if (unlikely(CE_RING_DELTA(nentries_mask,
758 write_index, sw_index - 1) < n_items)) { 777 write_index, sw_index - 1) < n_items)) {
759 err = -ENOBUFS; 778 err = -ENOBUFS;
760 goto unlock; 779 goto err;
761 } 780 }
762 781
763 for (i = 0; i < n_items - 1; i++) { 782 for (i = 0; i < n_items - 1; i++) {
@@ -774,7 +793,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
774 items[i].transfer_id, 793 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER); 794 CE_SEND_FLAG_GATHER);
776 if (err) 795 if (err)
777 goto unlock; 796 goto err;
778 } 797 }
779 798
780 /* `i` is equal to `n_items -1` after for() */ 799 /* `i` is equal to `n_items -1` after for() */
@@ -792,10 +811,15 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
792 items[i].transfer_id, 811 items[i].transfer_id,
793 0); 812 0);
794 if (err) 813 if (err)
795 goto unlock; 814 goto err;
815
816 spin_unlock_bh(&ar_pci->ce_lock);
817 return 0;
818
819err:
820 for (; i > 0; i--)
821 __ath10k_ce_send_revert(ce_pipe);
796 822
797 err = 0;
798unlock:
799 spin_unlock_bh(&ar_pci->ce_lock); 823 spin_unlock_bh(&ar_pci->ce_lock);
800 return err; 824 return err;
801} 825}
@@ -803,6 +827,9 @@ unlock:
803static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 827static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
804{ 828{
805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 829 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
830
831 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
832
806 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 833 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
807} 834}
808 835
@@ -854,6 +881,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
854static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 881static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
855 int force) 882 int force)
856{ 883{
884 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
885
857 if (!force) { 886 if (!force) {
858 int resources; 887 int resources;
859 /* 888 /*
@@ -880,7 +909,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
880{ 909{
881 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 910 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
882 911
883 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 912 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
884 913
885 memcpy(&ar_pci->msg_callbacks_current, callbacks, 914 memcpy(&ar_pci->msg_callbacks_current, callbacks,
886 sizeof(ar_pci->msg_callbacks_current)); 915 sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +967,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
938{ 967{
939 int ret = 0; 968 int ret = 0;
940 969
970 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
971
941 /* polling for received messages not supported */ 972 /* polling for received messages not supported */
942 *dl_is_polled = 0; 973 *dl_is_polled = 0;
943 974
@@ -997,6 +1028,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
997{ 1028{
998 int ul_is_polled, dl_is_polled; 1029 int ul_is_polled, dl_is_polled;
999 1030
1031 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1032
1000 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1033 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1001 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1034 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1002 ul_pipe, 1035 ul_pipe,
@@ -1098,6 +1131,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1098 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1131 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1099 int ret, ret_early; 1132 int ret, ret_early;
1100 1133
1134 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1135
1101 ath10k_pci_free_early_irq(ar); 1136 ath10k_pci_free_early_irq(ar);
1102 ath10k_pci_kill_tasklet(ar); 1137 ath10k_pci_kill_tasklet(ar);
1103 1138
@@ -1233,18 +1268,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1233 1268
1234static void ath10k_pci_ce_deinit(struct ath10k *ar) 1269static void ath10k_pci_ce_deinit(struct ath10k *ar)
1235{ 1270{
1236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1271 int i;
1237 struct ath10k_pci_pipe *pipe_info;
1238 int pipe_num;
1239 1272
1240 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1273 for (i = 0; i < CE_COUNT; i++)
1241 pipe_info = &ar_pci->pipe_info[pipe_num]; 1274 ath10k_ce_deinit_pipe(ar, i);
1242 if (pipe_info->ce_hdl) {
1243 ath10k_ce_deinit(pipe_info->ce_hdl);
1244 pipe_info->ce_hdl = NULL;
1245 pipe_info->buf_sz = 0;
1246 }
1247 }
1248} 1275}
1249 1276
1250static void ath10k_pci_hif_stop(struct ath10k *ar) 1277static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1279,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1279 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1253 int ret; 1280 int ret;
1254 1281
1255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 1282 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1283
1284 if (WARN_ON(!ar_pci->started))
1285 return;
1256 1286
1257 ret = ath10k_ce_disable_interrupts(ar); 1287 ret = ath10k_ce_disable_interrupts(ar);
1258 if (ret) 1288 if (ret)
@@ -1697,30 +1727,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
1697 return 0; 1727 return 0;
1698} 1728}
1699 1729
1730static int ath10k_pci_alloc_ce(struct ath10k *ar)
1731{
1732 int i, ret;
1733
1734 for (i = 0; i < CE_COUNT; i++) {
1735 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1736 if (ret) {
1737 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1738 i, ret);
1739 return ret;
1740 }
1741 }
1700 1742
1743 return 0;
1744}
1745
1746static void ath10k_pci_free_ce(struct ath10k *ar)
1747{
1748 int i;
1749
1750 for (i = 0; i < CE_COUNT; i++)
1751 ath10k_ce_free_pipe(ar, i);
1752}
1701 1753
1702static int ath10k_pci_ce_init(struct ath10k *ar) 1754static int ath10k_pci_ce_init(struct ath10k *ar)
1703{ 1755{
1704 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1705 struct ath10k_pci_pipe *pipe_info; 1757 struct ath10k_pci_pipe *pipe_info;
1706 const struct ce_attr *attr; 1758 const struct ce_attr *attr;
1707 int pipe_num; 1759 int pipe_num, ret;
1708 1760
1709 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1761 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1710 pipe_info = &ar_pci->pipe_info[pipe_num]; 1762 pipe_info = &ar_pci->pipe_info[pipe_num];
1763 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1711 pipe_info->pipe_num = pipe_num; 1764 pipe_info->pipe_num = pipe_num;
1712 pipe_info->hif_ce_state = ar; 1765 pipe_info->hif_ce_state = ar;
1713 attr = &host_ce_config_wlan[pipe_num]; 1766 attr = &host_ce_config_wlan[pipe_num];
1714 1767
1715 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); 1768 ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1716 if (pipe_info->ce_hdl == NULL) { 1769 if (ret) {
1717 ath10k_err("failed to initialize CE for pipe: %d\n", 1770 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1718 pipe_num); 1771 pipe_num, ret);
1719 1772 return ret;
1720 /* It is safe to call it here. It checks if ce_hdl is
1721 * valid for each pipe */
1722 ath10k_pci_ce_deinit(ar);
1723 return -1;
1724 } 1773 }
1725 1774
1726 if (pipe_num == CE_COUNT - 1) { 1775 if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1790,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
1741static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) 1790static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1742{ 1791{
1743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1792 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1744 u32 fw_indicator_address, fw_indicator; 1793 u32 fw_indicator;
1745 1794
1746 ath10k_pci_wake(ar); 1795 ath10k_pci_wake(ar);
1747 1796
1748 fw_indicator_address = ar_pci->fw_indicator_address; 1797 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1749 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1750 1798
1751 if (fw_indicator & FW_IND_EVENT_PENDING) { 1799 if (fw_indicator & FW_IND_EVENT_PENDING) {
1752 /* ACK: clear Target-side pending event */ 1800 /* ACK: clear Target-side pending event */
1753 ath10k_pci_write32(ar, fw_indicator_address, 1801 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1754 fw_indicator & ~FW_IND_EVENT_PENDING); 1802 fw_indicator & ~FW_IND_EVENT_PENDING);
1755 1803
1756 if (ar_pci->started) { 1804 if (ar_pci->started) {
@@ -1767,13 +1815,32 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1767 ath10k_pci_sleep(ar); 1815 ath10k_pci_sleep(ar);
1768} 1816}
1769 1817
1818/* this function effectively clears target memory controller assert line */
1819static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1820{
1821 u32 val;
1822
1823 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1824 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1825 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1826 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1827
1828 msleep(10);
1829
1830 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1831 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1832 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1833 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1834
1835 msleep(10);
1836}
1837
1770static int ath10k_pci_warm_reset(struct ath10k *ar) 1838static int ath10k_pci_warm_reset(struct ath10k *ar)
1771{ 1839{
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 int ret = 0; 1840 int ret = 0;
1774 u32 val; 1841 u32 val;
1775 1842
1776 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n"); 1843 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1777 1844
1778 ret = ath10k_do_pci_wake(ar); 1845 ret = ath10k_do_pci_wake(ar);
1779 if (ret) { 1846 if (ret) {
@@ -1801,7 +1868,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
1801 msleep(100); 1868 msleep(100);
1802 1869
1803 /* clear fw indicator */ 1870 /* clear fw indicator */
1804 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0); 1871 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1805 1872
1806 /* clear target LF timer interrupts */ 1873 /* clear target LF timer interrupts */
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 1874 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1826,6 +1893,8 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
1826 SOC_RESET_CONTROL_ADDRESS); 1893 SOC_RESET_CONTROL_ADDRESS);
1827 msleep(10); 1894 msleep(10);
1828 1895
1896 ath10k_pci_warm_reset_si0(ar);
1897
1829 /* debug */ 1898 /* debug */
1830 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1899 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 PCIE_INTR_CAUSE_ADDRESS); 1900 PCIE_INTR_CAUSE_ADDRESS);
@@ -1934,7 +2003,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1934 irq_mode = "legacy"; 2003 irq_mode = "legacy";
1935 2004
1936 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 2005 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1937 ath10k_info("pci irq %s\n", irq_mode); 2006 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
2007 irq_mode, ath10k_pci_irq_mode,
2008 ath10k_pci_reset_mode);
1938 2009
1939 return 0; 2010 return 0;
1940 2011
@@ -1952,23 +2023,52 @@ err:
1952 return ret; 2023 return ret;
1953} 2024}
1954 2025
2026static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
2027{
2028 int i, ret;
2029
2030 /*
2031 * Sometime warm reset succeeds after retries.
2032 *
2033 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
2034 * at first try.
2035 */
2036 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2037 ret = __ath10k_pci_hif_power_up(ar, false);
2038 if (ret == 0)
2039 break;
2040
2041 ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
2042 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
2043 }
2044
2045 return ret;
2046}
2047
1955static int ath10k_pci_hif_power_up(struct ath10k *ar) 2048static int ath10k_pci_hif_power_up(struct ath10k *ar)
1956{ 2049{
1957 int ret; 2050 int ret;
1958 2051
2052 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
2053
1959 /* 2054 /*
1960 * Hardware CUS232 version 2 has some issues with cold reset and the 2055 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 * preferred (and safer) way to perform a device reset is through a 2056 * preferred (and safer) way to perform a device reset is through a
1962 * warm reset. 2057 * warm reset.
1963 * 2058 *
1964 * Warm reset doesn't always work though (notably after a firmware 2059 * Warm reset doesn't always work though so fall back to cold reset may
1965 * crash) so fall back to cold reset if necessary. 2060 * be necessary.
1966 */ 2061 */
1967 ret = __ath10k_pci_hif_power_up(ar, false); 2062 ret = ath10k_pci_hif_power_up_warm(ar);
1968 if (ret) { 2063 if (ret) {
1969 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n", 2064 ath10k_warn("failed to power up target using warm reset: %d\n",
1970 ret); 2065 ret);
1971 2066
2067 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2068 return ret;
2069
2070 ath10k_warn("trying cold reset\n");
2071
1972 ret = __ath10k_pci_hif_power_up(ar, true); 2072 ret = __ath10k_pci_hif_power_up(ar, true);
1973 if (ret) { 2073 if (ret) {
1974 ath10k_err("failed to power up target using cold reset too (%d)\n", 2074 ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2084,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
1984{ 2084{
1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986 2086
2087 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2088
1987 ath10k_pci_free_early_irq(ar); 2089 ath10k_pci_free_early_irq(ar);
1988 ath10k_pci_kill_tasklet(ar); 2090 ath10k_pci_kill_tasklet(ar);
1989 ath10k_pci_deinit_irq(ar); 2091 ath10k_pci_deinit_irq(ar);
2092 ath10k_pci_ce_deinit(ar);
1990 ath10k_pci_warm_reset(ar); 2093 ath10k_pci_warm_reset(ar);
1991 2094
1992 ath10k_pci_ce_deinit(ar);
1993 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 2095 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1994 ath10k_do_pci_sleep(ar); 2096 ath10k_do_pci_sleep(ar);
1995} 2097}
@@ -2137,7 +2239,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2137static void ath10k_pci_early_irq_tasklet(unsigned long data) 2239static void ath10k_pci_early_irq_tasklet(unsigned long data)
2138{ 2240{
2139 struct ath10k *ar = (struct ath10k *)data; 2241 struct ath10k *ar = (struct ath10k *)data;
2140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2141 u32 fw_ind; 2242 u32 fw_ind;
2142 int ret; 2243 int ret;
2143 2244
@@ -2148,14 +2249,11 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
2148 return; 2249 return;
2149 } 2250 }
2150 2251
2151 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address); 2252 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2152 if (fw_ind & FW_IND_EVENT_PENDING) { 2253 if (fw_ind & FW_IND_EVENT_PENDING) {
2153 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 2254 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2154 fw_ind & ~FW_IND_EVENT_PENDING); 2255 fw_ind & ~FW_IND_EVENT_PENDING);
2155 2256 ath10k_pci_hif_dump_area(ar);
2156 /* Some structures are unavailable during early boot or at
2157 * driver teardown so just print that the device has crashed. */
2158 ath10k_warn("device crashed - no diagnostics available\n");
2159 } 2257 }
2160 2258
2161 ath10k_pci_sleep(ar); 2259 ath10k_pci_sleep(ar);
@@ -2385,33 +2483,69 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
2385static int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2483static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2386{ 2484{
2387 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2485 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 int wait_limit = 300; /* 3 sec */ 2486 unsigned long timeout;
2389 int ret; 2487 int ret;
2488 u32 val;
2489
2490 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2390 2491
2391 ret = ath10k_pci_wake(ar); 2492 ret = ath10k_pci_wake(ar);
2392 if (ret) { 2493 if (ret) {
2393 ath10k_err("failed to wake up target: %d\n", ret); 2494 ath10k_err("failed to wake up target for init: %d\n", ret);
2394 return ret; 2495 return ret;
2395 } 2496 }
2396 2497
2397 while (wait_limit-- && 2498 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2398 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & 2499
2399 FW_IND_INITIALIZED)) { 2500 do {
2501 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2502
2503 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2504
2505 /* target should never return this */
2506 if (val == 0xffffffff)
2507 continue;
2508
2509 /* the device has crashed so don't bother trying anymore */
2510 if (val & FW_IND_EVENT_PENDING)
2511 break;
2512
2513 if (val & FW_IND_INITIALIZED)
2514 break;
2515
2400 if (ar_pci->num_msi_intrs == 0) 2516 if (ar_pci->num_msi_intrs == 0)
2401 /* Fix potential race by repeating CORE_BASE writes */ 2517 /* Fix potential race by repeating CORE_BASE writes */
2402 iowrite32(PCIE_INTR_FIRMWARE_MASK | 2518 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2403 PCIE_INTR_CE_MASK_ALL, 2519 PCIE_INTR_FIRMWARE_MASK |
2404 ar_pci->mem + (SOC_CORE_BASE_ADDRESS | 2520 PCIE_INTR_CE_MASK_ALL);
2405 PCIE_INTR_ENABLE_ADDRESS)); 2521
2406 mdelay(10); 2522 mdelay(10);
2407 } 2523 } while (time_before(jiffies, timeout));
2408 2524
2409 if (wait_limit < 0) { 2525 if (val == 0xffffffff) {
2410 ath10k_err("target stalled\n"); 2526 ath10k_err("failed to read device register, device is gone\n");
2411 ret = -EIO; 2527 ret = -EIO;
2412 goto out; 2528 goto out;
2413 } 2529 }
2414 2530
2531 if (val & FW_IND_EVENT_PENDING) {
2532 ath10k_warn("device has crashed during init\n");
2533 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2534 val & ~FW_IND_EVENT_PENDING);
2535 ath10k_pci_hif_dump_area(ar);
2536 ret = -ECOMM;
2537 goto out;
2538 }
2539
2540 if (!(val & FW_IND_INITIALIZED)) {
2541 ath10k_err("failed to receive initialized event from target: %08x\n",
2542 val);
2543 ret = -ETIMEDOUT;
2544 goto out;
2545 }
2546
2547 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2548
2415out: 2549out:
2416 ath10k_pci_sleep(ar); 2550 ath10k_pci_sleep(ar);
2417 return ret; 2551 return ret;
@@ -2422,6 +2556,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
2422 int i, ret; 2556 int i, ret;
2423 u32 val; 2557 u32 val;
2424 2558
2559 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2560
2425 ret = ath10k_do_pci_wake(ar); 2561 ret = ath10k_do_pci_wake(ar);
2426 if (ret) { 2562 if (ret) {
2427 ath10k_err("failed to wake up target: %d\n", 2563 ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2589,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
2453 } 2589 }
2454 2590
2455 ath10k_do_pci_sleep(ar); 2591 ath10k_do_pci_sleep(ar);
2592
2593 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2594
2456 return 0; 2595 return 0;
2457} 2596}
2458 2597
@@ -2484,7 +2623,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2484 struct ath10k_pci *ar_pci; 2623 struct ath10k_pci *ar_pci;
2485 u32 lcr_val, chip_id; 2624 u32 lcr_val, chip_id;
2486 2625
2487 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 2626 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2488 2627
2489 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); 2628 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2490 if (ar_pci == NULL) 2629 if (ar_pci == NULL)
@@ -2503,7 +2642,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2503 goto err_ar_pci; 2642 goto err_ar_pci;
2504 } 2643 }
2505 2644
2506 if (ath10k_target_ps) 2645 if (ath10k_pci_target_ps)
2507 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); 2646 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2508 2647
2509 ath10k_pci_dump_features(ar_pci); 2648 ath10k_pci_dump_features(ar_pci);
@@ -2516,23 +2655,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2516 } 2655 }
2517 2656
2518 ar_pci->ar = ar; 2657 ar_pci->ar = ar;
2519 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2520 atomic_set(&ar_pci->keep_awake_count, 0); 2658 atomic_set(&ar_pci->keep_awake_count, 0);
2521 2659
2522 pci_set_drvdata(pdev, ar); 2660 pci_set_drvdata(pdev, ar);
2523 2661
2524 /*
2525 * Without any knowledge of the Host, the Target may have been reset or
2526 * power cycled and its Config Space may no longer reflect the PCI
2527 * address space that was assigned earlier by the PCI infrastructure.
2528 * Refresh it now.
2529 */
2530 ret = pci_assign_resource(pdev, BAR_NUM);
2531 if (ret) {
2532 ath10k_err("failed to assign PCI space: %d\n", ret);
2533 goto err_ar;
2534 }
2535
2536 ret = pci_enable_device(pdev); 2662 ret = pci_enable_device(pdev);
2537 if (ret) { 2663 if (ret) {
2538 ath10k_err("failed to enable PCI device: %d\n", ret); 2664 ath10k_err("failed to enable PCI device: %d\n", ret);
@@ -2594,16 +2720,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2594 2720
2595 ath10k_do_pci_sleep(ar); 2721 ath10k_do_pci_sleep(ar);
2596 2722
2723 ret = ath10k_pci_alloc_ce(ar);
2724 if (ret) {
2725 ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2726 goto err_iomap;
2727 }
2728
2597 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 2729 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2598 2730
2599 ret = ath10k_core_register(ar, chip_id); 2731 ret = ath10k_core_register(ar, chip_id);
2600 if (ret) { 2732 if (ret) {
2601 ath10k_err("failed to register driver core: %d\n", ret); 2733 ath10k_err("failed to register driver core: %d\n", ret);
2602 goto err_iomap; 2734 goto err_free_ce;
2603 } 2735 }
2604 2736
2605 return 0; 2737 return 0;
2606 2738
2739err_free_ce:
2740 ath10k_pci_free_ce(ar);
2607err_iomap: 2741err_iomap:
2608 pci_iounmap(pdev, mem); 2742 pci_iounmap(pdev, mem);
2609err_master: 2743err_master:
@@ -2626,7 +2760,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2626 struct ath10k *ar = pci_get_drvdata(pdev); 2760 struct ath10k *ar = pci_get_drvdata(pdev);
2627 struct ath10k_pci *ar_pci; 2761 struct ath10k_pci *ar_pci;
2628 2762
2629 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 2763 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2630 2764
2631 if (!ar) 2765 if (!ar)
2632 return; 2766 return;
@@ -2636,9 +2770,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2636 if (!ar_pci) 2770 if (!ar_pci)
2637 return; 2771 return;
2638 2772
2639 tasklet_kill(&ar_pci->msi_fw_err);
2640
2641 ath10k_core_unregister(ar); 2773 ath10k_core_unregister(ar);
2774 ath10k_pci_free_ce(ar);
2642 2775
2643 pci_iounmap(pdev, ar_pci->mem); 2776 pci_iounmap(pdev, ar_pci->mem);
2644 pci_release_region(pdev, BAR_NUM); 2777 pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2813,5 @@ module_exit(ath10k_pci_exit);
2680MODULE_AUTHOR("Qualcomm Atheros"); 2813MODULE_AUTHOR("Qualcomm Atheros");
2681MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 2814MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2682MODULE_LICENSE("Dual BSD/GPL"); 2815MODULE_LICENSE("Dual BSD/GPL");
2683MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 2816MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2684MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2685MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 2817MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index b43fdb4f7319..dfdebb4157aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -189,9 +189,6 @@ struct ath10k_pci {
189 189
190 struct ath10k_hif_cb msg_callbacks_current; 190 struct ath10k_hif_cb msg_callbacks_current;
191 191
192 /* Target address used to signal a pending firmware event */
193 u32 fw_indicator_address;
194
195 /* Copy Engine used for Diagnostic Accesses */ 192 /* Copy Engine used for Diagnostic Accesses */
196 struct ath10k_ce_pipe *ce_diag; 193 struct ath10k_ce_pipe *ce_diag;
197 194
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 0541dd939ce9..82669a77e553 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -100,189 +100,6 @@ exit:
100 wake_up(&htt->empty_tx_wq); 100 wake_up(&htt->empty_tx_wq);
101} 101}
102 102
103static const u8 rx_legacy_rate_idx[] = {
104 3, /* 0x00 - 11Mbps */
105 2, /* 0x01 - 5.5Mbps */
106 1, /* 0x02 - 2Mbps */
107 0, /* 0x03 - 1Mbps */
108 3, /* 0x04 - 11Mbps */
109 2, /* 0x05 - 5.5Mbps */
110 1, /* 0x06 - 2Mbps */
111 0, /* 0x07 - 1Mbps */
112 10, /* 0x08 - 48Mbps */
113 8, /* 0x09 - 24Mbps */
114 6, /* 0x0A - 12Mbps */
115 4, /* 0x0B - 6Mbps */
116 11, /* 0x0C - 54Mbps */
117 9, /* 0x0D - 36Mbps */
118 7, /* 0x0E - 18Mbps */
119 5, /* 0x0F - 9Mbps */
120};
121
122static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
123 enum ieee80211_band band,
124 struct ieee80211_rx_status *status)
125{
126 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
127 u8 info0 = info->rate.info0;
128 u32 info1 = info->rate.info1;
129 u32 info2 = info->rate.info2;
130 u8 preamble = 0;
131
132 /* Check if valid fields */
133 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
134 return;
135
136 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
137
138 switch (preamble) {
139 case HTT_RX_LEGACY:
140 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
141 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
142 rate_idx = 0;
143
144 if (rate < 0x08 || rate > 0x0F)
145 break;
146
147 switch (band) {
148 case IEEE80211_BAND_2GHZ:
149 if (cck)
150 rate &= ~BIT(3);
151 rate_idx = rx_legacy_rate_idx[rate];
152 break;
153 case IEEE80211_BAND_5GHZ:
154 rate_idx = rx_legacy_rate_idx[rate];
155 /* We are using same rate table registering
156 HW - ath10k_rates[]. In case of 5GHz skip
157 CCK rates, so -4 here */
158 rate_idx -= 4;
159 break;
160 default:
161 break;
162 }
163
164 status->rate_idx = rate_idx;
165 break;
166 case HTT_RX_HT:
167 case HTT_RX_HT_WITH_TXBF:
168 /* HT-SIG - Table 20-11 in info1 and info2 */
169 mcs = info1 & 0x1F;
170 nss = mcs >> 3;
171 bw = (info1 >> 7) & 1;
172 sgi = (info2 >> 7) & 1;
173
174 status->rate_idx = mcs;
175 status->flag |= RX_FLAG_HT;
176 if (sgi)
177 status->flag |= RX_FLAG_SHORT_GI;
178 if (bw)
179 status->flag |= RX_FLAG_40MHZ;
180 break;
181 case HTT_RX_VHT:
182 case HTT_RX_VHT_WITH_TXBF:
183 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
184 TODO check this */
185 mcs = (info2 >> 4) & 0x0F;
186 nss = ((info1 >> 10) & 0x07) + 1;
187 bw = info1 & 3;
188 sgi = info2 & 1;
189
190 status->rate_idx = mcs;
191 status->vht_nss = nss;
192
193 if (sgi)
194 status->flag |= RX_FLAG_SHORT_GI;
195
196 switch (bw) {
197 /* 20MHZ */
198 case 0:
199 break;
200 /* 40MHZ */
201 case 1:
202 status->flag |= RX_FLAG_40MHZ;
203 break;
204 /* 80MHZ */
205 case 2:
206 status->vht_flag |= RX_VHT_FLAG_80MHZ;
207 }
208
209 status->flag |= RX_FLAG_VHT;
210 break;
211 default:
212 break;
213 }
214}
215
216void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
217{
218 struct ieee80211_rx_status *status;
219 struct ieee80211_channel *ch;
220 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
221
222 status = IEEE80211_SKB_RXCB(info->skb);
223 memset(status, 0, sizeof(*status));
224
225 if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
226 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
227 RX_FLAG_MMIC_STRIPPED;
228 hdr->frame_control = __cpu_to_le16(
229 __le16_to_cpu(hdr->frame_control) &
230 ~IEEE80211_FCTL_PROTECTED);
231 }
232
233 if (info->mic_err)
234 status->flag |= RX_FLAG_MMIC_ERROR;
235
236 if (info->fcs_err)
237 status->flag |= RX_FLAG_FAILED_FCS_CRC;
238
239 if (info->amsdu_more)
240 status->flag |= RX_FLAG_AMSDU_MORE;
241
242 status->signal = info->signal;
243
244 spin_lock_bh(&ar->data_lock);
245 ch = ar->scan_channel;
246 if (!ch)
247 ch = ar->rx_channel;
248 spin_unlock_bh(&ar->data_lock);
249
250 if (!ch) {
251 ath10k_warn("no channel configured; ignoring frame!\n");
252 dev_kfree_skb_any(info->skb);
253 return;
254 }
255
256 process_rx_rates(ar, info, ch->band, status);
257 status->band = ch->band;
258 status->freq = ch->center_freq;
259
260 if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
261 /* TSF available only in 32-bit */
262 status->mactime = info->tsf & 0xffffffff;
263 status->flag |= RX_FLAG_MACTIME_END;
264 }
265
266 ath10k_dbg(ATH10K_DBG_DATA,
267 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
268 info->skb,
269 info->skb->len,
270 status->flag == 0 ? "legacy" : "",
271 status->flag & RX_FLAG_HT ? "ht" : "",
272 status->flag & RX_FLAG_VHT ? "vht" : "",
273 status->flag & RX_FLAG_40MHZ ? "40" : "",
274 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
275 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
276 status->rate_idx,
277 status->vht_nss,
278 status->freq,
279 status->band, status->flag, info->fcs_err);
280 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
281 info->skb->data, info->skb->len);
282
283 ieee80211_rx(ar->hw, info->skb);
284}
285
286struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 103struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
287 const u8 *addr) 104 const u8 *addr)
288{ 105{
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index 356dc9c04c9e..aee3e20058f8 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -21,7 +21,6 @@
21 21
22void ath10k_txrx_tx_unref(struct ath10k_htt *htt, 22void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
23 const struct htt_tx_done *tx_done); 23 const struct htt_tx_done *tx_done);
24void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
25 24
26struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 25struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
27 const u8 *addr); 26 const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb1f7b5bcf4c..4b7782a529ac 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -639,6 +639,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
639 struct sk_buff *wmi_skb; 639 struct sk_buff *wmi_skb;
640 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 640 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
641 int len; 641 int len;
642 u32 buf_len = skb->len;
642 u16 fc; 643 u16 fc;
643 644
644 hdr = (struct ieee80211_hdr *)skb->data; 645 hdr = (struct ieee80211_hdr *)skb->data;
@@ -648,6 +649,15 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
648 return -EINVAL; 649 return -EINVAL;
649 650
650 len = sizeof(cmd->hdr) + skb->len; 651 len = sizeof(cmd->hdr) + skb->len;
652
653 if ((ieee80211_is_action(hdr->frame_control) ||
654 ieee80211_is_deauth(hdr->frame_control) ||
655 ieee80211_is_disassoc(hdr->frame_control)) &&
656 ieee80211_has_protected(hdr->frame_control)) {
657 len += IEEE80211_CCMP_MIC_LEN;
658 buf_len += IEEE80211_CCMP_MIC_LEN;
659 }
660
651 len = round_up(len, 4); 661 len = round_up(len, 4);
652 662
653 wmi_skb = ath10k_wmi_alloc_skb(len); 663 wmi_skb = ath10k_wmi_alloc_skb(len);
@@ -659,7 +669,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
659 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); 669 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
660 cmd->hdr.tx_rate = 0; 670 cmd->hdr.tx_rate = 0;
661 cmd->hdr.tx_power = 0; 671 cmd->hdr.tx_power = 0;
662 cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len)); 672 cmd->hdr.buf_len = __cpu_to_le32(buf_len);
663 673
664 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); 674 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
665 memcpy(cmd->buf, skb->data, skb->len); 675 memcpy(cmd->buf, skb->data, skb->len);
@@ -957,10 +967,16 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
957 * frames with Protected Bit set. */ 967 * frames with Protected Bit set. */
958 if (ieee80211_has_protected(hdr->frame_control) && 968 if (ieee80211_has_protected(hdr->frame_control) &&
959 !ieee80211_is_auth(hdr->frame_control)) { 969 !ieee80211_is_auth(hdr->frame_control)) {
960 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | 970 status->flag |= RX_FLAG_DECRYPTED;
961 RX_FLAG_MMIC_STRIPPED; 971
962 hdr->frame_control = __cpu_to_le16(fc & 972 if (!ieee80211_is_action(hdr->frame_control) &&
973 !ieee80211_is_deauth(hdr->frame_control) &&
974 !ieee80211_is_disassoc(hdr->frame_control)) {
975 status->flag |= RX_FLAG_IV_STRIPPED |
976 RX_FLAG_MMIC_STRIPPED;
977 hdr->frame_control = __cpu_to_le16(fc &
963 ~IEEE80211_FCTL_PROTECTED); 978 ~IEEE80211_FCTL_PROTECTED);
979 }
964 } 980 }
965 981
966 ath10k_dbg(ATH10K_DBG_MGMT, 982 ath10k_dbg(ATH10K_DBG_MGMT,
@@ -1362,13 +1378,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1362 struct sk_buff *bcn; 1378 struct sk_buff *bcn;
1363 int ret, vdev_id = 0; 1379 int ret, vdev_id = 0;
1364 1380
1365 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
1366
1367 ev = (struct wmi_host_swba_event *)skb->data; 1381 ev = (struct wmi_host_swba_event *)skb->data;
1368 map = __le32_to_cpu(ev->vdev_map); 1382 map = __le32_to_cpu(ev->vdev_map);
1369 1383
1370 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" 1384 ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
1371 "-vdev map 0x%x\n",
1372 ev->vdev_map); 1385 ev->vdev_map);
1373 1386
1374 for (; map; map >>= 1, vdev_id++) { 1387 for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1398,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1385 bcn_info = &ev->bcn_info[i]; 1398 bcn_info = &ev->bcn_info[i];
1386 1399
1387 ath10k_dbg(ATH10K_DBG_MGMT, 1400 ath10k_dbg(ATH10K_DBG_MGMT,
1388 "-bcn_info[%d]:\n" 1401 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
1389 "--tim_len %d\n"
1390 "--tim_mcast %d\n"
1391 "--tim_changed %d\n"
1392 "--tim_num_ps_pending %d\n"
1393 "--tim_bitmap 0x%08x%08x%08x%08x\n",
1394 i, 1402 i,
1395 __le32_to_cpu(bcn_info->tim_info.tim_len), 1403 __le32_to_cpu(bcn_info->tim_info.tim_len),
1396 __le32_to_cpu(bcn_info->tim_info.tim_mcast), 1404 __le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -1439,6 +1447,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1439 ATH10K_SKB_CB(arvif->beacon)->paddr, 1447 ATH10K_SKB_CB(arvif->beacon)->paddr,
1440 arvif->beacon->len, DMA_TO_DEVICE); 1448 arvif->beacon->len, DMA_TO_DEVICE);
1441 dev_kfree_skb_any(arvif->beacon); 1449 dev_kfree_skb_any(arvif->beacon);
1450 arvif->beacon = NULL;
1442 } 1451 }
1443 1452
1444 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, 1453 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
@@ -1448,6 +1457,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1448 ATH10K_SKB_CB(bcn)->paddr); 1457 ATH10K_SKB_CB(bcn)->paddr);
1449 if (ret) { 1458 if (ret) {
1450 ath10k_warn("failed to map beacon: %d\n", ret); 1459 ath10k_warn("failed to map beacon: %d\n", ret);
1460 dev_kfree_skb_any(bcn);
1451 goto skip; 1461 goto skip;
1452 } 1462 }
1453 1463
@@ -2365,7 +2375,7 @@ void ath10k_wmi_detach(struct ath10k *ar)
2365 ar->wmi.num_mem_chunks = 0; 2375 ar->wmi.num_mem_chunks = 0;
2366} 2376}
2367 2377
2368int ath10k_wmi_connect_htc_service(struct ath10k *ar) 2378int ath10k_wmi_connect(struct ath10k *ar)
2369{ 2379{
2370 int status; 2380 int status;
2371 struct ath10k_htc_svc_conn_req conn_req; 2381 struct ath10k_htc_svc_conn_req conn_req;
@@ -2393,8 +2403,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
2393 return 0; 2403 return 0;
2394} 2404}
2395 2405
2396int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 2406static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2397 u16 rd5g, u16 ctl2g, u16 ctl5g) 2407 u16 rd2g, u16 rd5g, u16 ctl2g,
2408 u16 ctl5g)
2398{ 2409{
2399 struct wmi_pdev_set_regdomain_cmd *cmd; 2410 struct wmi_pdev_set_regdomain_cmd *cmd;
2400 struct sk_buff *skb; 2411 struct sk_buff *skb;
@@ -2418,6 +2429,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
2418 ar->wmi.cmd->pdev_set_regdomain_cmdid); 2429 ar->wmi.cmd->pdev_set_regdomain_cmdid);
2419} 2430}
2420 2431
2432static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2433 u16 rd2g, u16 rd5g,
2434 u16 ctl2g, u16 ctl5g,
2435 enum wmi_dfs_region dfs_reg)
2436{
2437 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
2438 struct sk_buff *skb;
2439
2440 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2441 if (!skb)
2442 return -ENOMEM;
2443
2444 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
2445 cmd->reg_domain = __cpu_to_le32(rd);
2446 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
2447 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
2448 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
2449 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
2450 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
2451
2452 ath10k_dbg(ATH10K_DBG_WMI,
2453 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
2454 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
2455
2456 return ath10k_wmi_cmd_send(ar, skb,
2457 ar->wmi.cmd->pdev_set_regdomain_cmdid);
2458}
2459
2460int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
2461 u16 rd5g, u16 ctl2g, u16 ctl5g,
2462 enum wmi_dfs_region dfs_reg)
2463{
2464 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
2465 return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
2466 ctl2g, ctl5g, dfs_reg);
2467 else
2468 return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
2469 ctl2g, ctl5g);
2470}
2471
2421int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 2472int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2422 const struct wmi_channel_arg *arg) 2473 const struct wmi_channel_arg *arg)
2423{ 2474{
@@ -3456,8 +3507,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
3456 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 3507 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
3457 3508
3458 ath10k_dbg(ATH10K_DBG_WMI, 3509 ath10k_dbg(ATH10K_DBG_WMI,
3459 "wmi peer assoc vdev %d addr %pM\n", 3510 "wmi peer assoc vdev %d addr %pM (%s)\n",
3460 arg->vdev_id, arg->addr); 3511 arg->vdev_id, arg->addr,
3512 arg->peer_reassoc ? "reassociate" : "new");
3461 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 3513 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
3462} 3514}
3463 3515
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f51d5ca0141f..e93df2c10413 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
198 } __packed; 198 } __packed;
199} __packed; 199} __packed;
200 200
201/* macro to convert MAC address from WMI word format to char array */
202#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
203 (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
204 (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
205 (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
206 (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
207 (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
208 (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
209 } while (0)
210
211struct wmi_cmd_map { 201struct wmi_cmd_map {
212 u32 init_cmdid; 202 u32 init_cmdid;
213 u32 start_scan_cmdid; 203 u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
2185 __le32 conformance_test_limit_5G; 2175 __le32 conformance_test_limit_5G;
2186} __packed; 2176} __packed;
2187 2177
2178enum wmi_dfs_region {
2179 /* Uninitialized dfs domain */
2180 WMI_UNINIT_DFS_DOMAIN = 0,
2181
2182 /* FCC3 dfs domain */
2183 WMI_FCC_DFS_DOMAIN = 1,
2184
2185 /* ETSI dfs domain */
2186 WMI_ETSI_DFS_DOMAIN = 2,
2187
2188 /*Japan dfs domain */
2189 WMI_MKK4_DFS_DOMAIN = 3,
2190};
2191
2192struct wmi_pdev_set_regdomain_cmd_10x {
2193 __le32 reg_domain;
2194 __le32 reg_domain_2G;
2195 __le32 reg_domain_5G;
2196 __le32 conformance_test_limit_2G;
2197 __le32 conformance_test_limit_5G;
2198
2199 /* dfs domain from wmi_dfs_region */
2200 __le32 dfs_domain;
2201} __packed;
2202
2188/* Command to set/unset chip in quiet mode */ 2203/* Command to set/unset chip in quiet mode */
2189struct wmi_pdev_set_quiet_cmd { 2204struct wmi_pdev_set_quiet_cmd {
2190 /* period in TUs */ 2205 /* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
2210 ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */ 2225 ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
2211}; 2226};
2212 2227
2228enum wmi_rtscts_profile {
2229 WMI_RTSCTS_FOR_NO_RATESERIES = 0,
2230 WMI_RTSCTS_FOR_SECOND_RATESERIES,
2231 WMI_RTSCTS_ACROSS_SW_RETRIES
2232};
2233
2234#define WMI_RTSCTS_ENABLED 1
2235#define WMI_RTSCTS_SET_MASK 0x0f
2236#define WMI_RTSCTS_SET_LSB 0
2237
2238#define WMI_RTSCTS_PROFILE_MASK 0xf0
2239#define WMI_RTSCTS_PROFILE_LSB 4
2240
2213enum wmi_beacon_gen_mode { 2241enum wmi_beacon_gen_mode {
2214 WMI_BEACON_STAGGERED_MODE = 0, 2242 WMI_BEACON_STAGGERED_MODE = 0,
2215 WMI_BEACON_BURST_MODE = 1 2243 WMI_BEACON_BURST_MODE = 1
@@ -2295,9 +2323,9 @@ struct wmi_pdev_param_map {
2295#define WMI_PDEV_PARAM_UNSUPPORTED 0 2323#define WMI_PDEV_PARAM_UNSUPPORTED 0
2296 2324
2297enum wmi_pdev_param { 2325enum wmi_pdev_param {
2298 /* TX chian mask */ 2326 /* TX chain mask */
2299 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, 2327 WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
2300 /* RX chian mask */ 2328 /* RX chain mask */
2301 WMI_PDEV_PARAM_RX_CHAIN_MASK, 2329 WMI_PDEV_PARAM_RX_CHAIN_MASK,
2302 /* TX power limit for 2G Radio */ 2330 /* TX power limit for 2G Radio */
2303 WMI_PDEV_PARAM_TXPOWER_LIMIT2G, 2331 WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
2682 /* wal pdev resets */ 2710 /* wal pdev resets */
2683 __le32 pdev_resets; 2711 __le32 pdev_resets;
2684 2712
2713 /* frames dropped due to non-availability of stateless TIDs */
2714 __le32 stateless_tid_alloc_failure;
2715
2685 __le32 phy_underrun; 2716 __le32 phy_underrun;
2686 2717
2687 /* MPDU is more than txop limit */ 2718 /* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
2738 WMI_REQUEST_AP_STAT = 0x02 2769 WMI_REQUEST_AP_STAT = 0x02
2739}; 2770};
2740 2771
2772struct wlan_inst_rssi_args {
2773 __le16 cfg_retry_count;
2774 __le16 retry_count;
2775};
2776
2741struct wmi_request_stats_cmd { 2777struct wmi_request_stats_cmd {
2742 __le32 stats_id; 2778 __le32 stats_id;
2743 2779
2744 /* 2780 __le32 vdev_id;
2745 * Space to add parameters like 2781
2746 * peer mac addr 2782 /* peer MAC address */
2747 */ 2783 struct wmi_mac_addr peer_macaddr;
2784
2785 /* Instantaneous RSSI arguments */
2786 struct wlan_inst_rssi_args inst_rssi_args;
2748} __packed; 2787} __packed;
2749 2788
2750/* Suspend option */ 2789/* Suspend option */
@@ -2795,7 +2834,18 @@ struct wmi_stats_event {
2795 * PDEV statistics 2834 * PDEV statistics
2796 * TODO: add all PDEV stats here 2835 * TODO: add all PDEV stats here
2797 */ 2836 */
2798struct wmi_pdev_stats { 2837struct wmi_pdev_stats_old {
2838 __le32 chan_nf; /* Channel noise floor */
2839 __le32 tx_frame_count; /* TX frame count */
2840 __le32 rx_frame_count; /* RX frame count */
2841 __le32 rx_clear_count; /* rx clear count */
2842 __le32 cycle_count; /* cycle count */
2843 __le32 phy_err_count; /* Phy error count */
2844 __le32 chan_tx_pwr; /* channel tx power */
2845 struct wal_dbg_stats wal; /* WAL dbg stats */
2846} __packed;
2847
2848struct wmi_pdev_stats_10x {
2799 __le32 chan_nf; /* Channel noise floor */ 2849 __le32 chan_nf; /* Channel noise floor */
2800 __le32 tx_frame_count; /* TX frame count */ 2850 __le32 tx_frame_count; /* TX frame count */
2801 __le32 rx_frame_count; /* RX frame count */ 2851 __le32 rx_frame_count; /* RX frame count */
@@ -2804,6 +2854,12 @@ struct wmi_pdev_stats {
2804 __le32 phy_err_count; /* Phy error count */ 2854 __le32 phy_err_count; /* Phy error count */
2805 __le32 chan_tx_pwr; /* channel tx power */ 2855 __le32 chan_tx_pwr; /* channel tx power */
2806 struct wal_dbg_stats wal; /* WAL dbg stats */ 2856 struct wal_dbg_stats wal; /* WAL dbg stats */
2857 __le32 ack_rx_bad;
2858 __le32 rts_bad;
2859 __le32 rts_good;
2860 __le32 fcs_bad;
2861 __le32 no_beacons;
2862 __le32 mib_int_count;
2807} __packed; 2863} __packed;
2808 2864
2809/* 2865/*
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
2818 * peer statistics. 2874 * peer statistics.
2819 * TODO: add more stats 2875 * TODO: add more stats
2820 */ 2876 */
2821struct wmi_peer_stats { 2877struct wmi_peer_stats_old {
2878 struct wmi_mac_addr peer_macaddr;
2879 __le32 peer_rssi;
2880 __le32 peer_tx_rate;
2881} __packed;
2882
2883struct wmi_peer_stats_10x {
2822 struct wmi_mac_addr peer_macaddr; 2884 struct wmi_mac_addr peer_macaddr;
2823 __le32 peer_rssi; 2885 __le32 peer_rssi;
2824 __le32 peer_tx_rate; 2886 __le32 peer_tx_rate;
2887 __le32 peer_rx_rate;
2825} __packed; 2888} __packed;
2826 2889
2827struct wmi_vdev_create_cmd { 2890struct wmi_vdev_create_cmd {
@@ -4196,13 +4259,14 @@ void ath10k_wmi_detach(struct ath10k *ar);
4196int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); 4259int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
4197int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); 4260int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
4198 4261
4199int ath10k_wmi_connect_htc_service(struct ath10k *ar); 4262int ath10k_wmi_connect(struct ath10k *ar);
4200int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 4263int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
4201 const struct wmi_channel_arg *); 4264 const struct wmi_channel_arg *);
4202int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); 4265int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
4203int ath10k_wmi_pdev_resume_target(struct ath10k *ar); 4266int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
4204int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 4267int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
4205 u16 rd5g, u16 ctl2g, u16 ctl5g); 4268 u16 rd5g, u16 ctl2g, u16 ctl5g,
4269 enum wmi_dfs_region dfs_reg);
4206int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value); 4270int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
4207int ath10k_wmi_cmd_init(struct ath10k *ar); 4271int ath10k_wmi_cmd_init(struct ath10k *ar);
4208int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); 4272int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a2973b7acf2..0fce1c76638e 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3709 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP), 3709 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
3710 AR5K_TPC); 3710 AR5K_TPC);
3711 } else { 3711 } else {
3712 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX | 3712 ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
3713 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 3713 AR5K_PHY_TXPOWER_RATE_MAX);
3714 } 3714 }
3715 3715
3716 return 0; 3716 return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index e39e5860a2e9..9c125ff083f7 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,11 +1,19 @@
1config ATH6KL 1config ATH6KL
2 tristate "Atheros mobile chipsets support" 2 tristate "Atheros mobile chipsets support"
3 depends on CFG80211
4 ---help---
5 This module adds core support for wireless adapters based on
6 Atheros AR6003 and AR6004 chipsets. You still need separate
7 bus drivers for USB and SDIO to be able to use real devices.
8
9 If you choose to build it as a module, it will be called
10 ath6kl_core. Please note that AR6002 and AR6001 are not
11 supported by this driver.
3 12
4config ATH6KL_SDIO 13config ATH6KL_SDIO
5 tristate "Atheros ath6kl SDIO support" 14 tristate "Atheros ath6kl SDIO support"
6 depends on ATH6KL 15 depends on ATH6KL
7 depends on MMC 16 depends on MMC
8 depends on CFG80211
9 ---help--- 17 ---help---
10 This module adds support for wireless adapters based on 18 This module adds support for wireless adapters based on
11 Atheros AR6003 and AR6004 chipsets running over SDIO. If you 19 Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
17 tristate "Atheros ath6kl USB support" 25 tristate "Atheros ath6kl USB support"
18 depends on ATH6KL 26 depends on ATH6KL
19 depends on USB 27 depends on USB
20 depends on CFG80211
21 ---help--- 28 ---help---
22 This module adds support for wireless adapters based on 29 This module adds support for wireless adapters based on
23 Atheros AR6004 chipset running over USB. This is still under 30 Atheros AR6004 chipset and chipsets based on it running over
24 implementation and it isn't functional. If you choose to 31 USB. If you choose to build it as a module, it will be
25 build it as a module, it will be called ath6kl_usb. 32 called ath6kl_usb.
26 33
27config ATH6KL_DEBUG 34config ATH6KL_DEBUG
28 bool "Atheros ath6kl debugging" 35 bool "Atheros ath6kl debugging"
29 depends on ATH6KL 36 depends on ATH6KL
30 ---help--- 37 ---help---
31 Enables debug support 38 Enables ath6kl debug support, including debug messages
39 enabled with debug_mask module parameter and debugfs
40 interface.
41
42 If unsure, say Y to make it easier to debug problems.
32 43
33config ATH6KL_TRACING 44config ATH6KL_TRACING
34 bool "Atheros ath6kl tracing support" 45 bool "Atheros ath6kl tracing support"
35 depends on ATH6KL 46 depends on ATH6KL
36 depends on EVENT_TRACING 47 depends on EVENT_TRACING
37 ---help--- 48 ---help---
38 Select this to ath6kl use tracing infrastructure. 49 Select this to ath6kl use tracing infrastructure which, for
50 example, can be enabled with help of trace-cmd. All debug
51 messages and commands are delivered to using individually
52 enablable trace points.
39 53
40 If unsure, say Y to make it easier to debug problems. 54 If unsure, say Y to make it easier to debug problems.
41 55
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
47 Enabling this makes it possible to change the regdomain in 61 Enabling this makes it possible to change the regdomain in
48 the firmware. This can be only enabled if regulatory requirements 62 the firmware. This can be only enabled if regulatory requirements
49 are taken into account. 63 are taken into account.
64
65 If unsure, say N.
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index c2c6f4604958..0e26f4a34fda 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
724 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 724 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
725 "added bss %pM to cfg80211\n", bssid); 725 "added bss %pM to cfg80211\n", bssid);
726 kfree(ie); 726 kfree(ie);
727 } else 727 } else {
728 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n"); 728 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
729 }
729 730
730 return bss; 731 return bss;
731} 732}
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
970 ssid_list[i].flag, 971 ssid_list[i].flag,
971 ssid_list[i].ssid.ssid_len, 972 ssid_list[i].ssid.ssid_len,
972 ssid_list[i].ssid.ssid); 973 ssid_list[i].ssid.ssid);
973
974 } 974 }
975 975
976 /* Make sure no old entries are left behind */ 976 /* Make sure no old entries are left behind */
@@ -1759,7 +1759,7 @@ static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
1759} 1759}
1760 1760
1761static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, 1761static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1762 u8 *mac, struct station_info *sinfo) 1762 const u8 *mac, struct station_info *sinfo)
1763{ 1763{
1764 struct ath6kl *ar = ath6kl_priv(dev); 1764 struct ath6kl *ar = ath6kl_priv(dev);
1765 struct ath6kl_vif *vif = netdev_priv(dev); 1765 struct ath6kl_vif *vif = netdev_priv(dev);
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
1897 1897
1898 /* Configure the patterns that we received from the user. */ 1898 /* Configure the patterns that we received from the user. */
1899 for (i = 0; i < wow->n_patterns; i++) { 1899 for (i = 0; i < wow->n_patterns; i++) {
1900
1901 /* 1900 /*
1902 * Convert given nl80211 specific mask value to equivalent 1901 * Convert given nl80211 specific mask value to equivalent
1903 * driver specific mask value and send it to the chip along 1902 * driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2850 if (p.prwise_crypto_type == 0) { 2849 if (p.prwise_crypto_type == 0) {
2851 p.prwise_crypto_type = NONE_CRYPT; 2850 p.prwise_crypto_type = NONE_CRYPT;
2852 ath6kl_set_cipher(vif, 0, true); 2851 ath6kl_set_cipher(vif, 0, true);
2853 } else if (info->crypto.n_ciphers_pairwise == 1) 2852 } else if (info->crypto.n_ciphers_pairwise == 1) {
2854 ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true); 2853 ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
2854 }
2855 2855
2856 switch (info->crypto.cipher_group) { 2856 switch (info->crypto.cipher_group) {
2857 case WLAN_CIPHER_SUITE_WEP40: 2857 case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2897 } 2897 }
2898 2898
2899 if (info->inactivity_timeout) { 2899 if (info->inactivity_timeout) {
2900
2901 inactivity_timeout = info->inactivity_timeout; 2900 inactivity_timeout = info->inactivity_timeout;
2902 2901
2903 if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS) 2902 if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
@@ -2975,7 +2974,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
2975static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2974static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2976 2975
2977static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev, 2976static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
2978 u8 *mac) 2977 const u8 *mac)
2979{ 2978{
2980 struct ath6kl *ar = ath6kl_priv(dev); 2979 struct ath6kl *ar = ath6kl_priv(dev);
2981 struct ath6kl_vif *vif = netdev_priv(dev); 2980 struct ath6kl_vif *vif = netdev_priv(dev);
@@ -2986,7 +2985,8 @@ static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
2986} 2985}
2987 2986
2988static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, 2987static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
2989 u8 *mac, struct station_parameters *params) 2988 const u8 *mac,
2989 struct station_parameters *params)
2990{ 2990{
2991 struct ath6kl *ar = ath6kl_priv(dev); 2991 struct ath6kl *ar = ath6kl_priv(dev);
2992 struct ath6kl_vif *vif = netdev_priv(dev); 2992 struct ath6kl_vif *vif = netdev_priv(dev);
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4b46adbe8c92..b0b652042760 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
45module_param(recovery_enable, uint, 0644); 45module_param(recovery_enable, uint, 0644);
46module_param(heart_beat_poll, uint, 0644); 46module_param(heart_beat_poll, uint, 0644);
47MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error"); 47MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
48MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \ 48MODULE_PARM_DESC(heart_beat_poll,
49 "polling. This also specifies the polling interval in" \ 49 "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
50 "msecs. Set reocvery_enable for this to be effective"); 50
51 51
52void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) 52void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
53{ 53{
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index dbfd17d0a5fa..55c4064dd506 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
172 struct ath6kl_irq_proc_registers *irq_proc_reg, 172 struct ath6kl_irq_proc_registers *irq_proc_reg,
173 struct ath6kl_irq_enable_reg *irq_enable_reg) 173 struct ath6kl_irq_enable_reg *irq_enable_reg)
174{ 174{
175
176 ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n")); 175 ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
177 176
178 if (irq_proc_reg != NULL) { 177 if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
219 "GMBOX lookahead alias 1: 0x%x\n", 218 "GMBOX lookahead alias 1: 0x%x\n",
220 irq_proc_reg->rx_gmbox_lkahd_alias[1]); 219 irq_proc_reg->rx_gmbox_lkahd_alias[1]);
221 } 220 }
222
223 } 221 }
224 222
225 if (irq_enable_reg != NULL) { 223 if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
1396 const char __user *user_buf, 1394 const char __user *user_buf,
1397 size_t count, loff_t *ppos) 1395 size_t count, loff_t *ppos)
1398{ 1396{
1399
1400 struct ath6kl *ar = file->private_data; 1397 struct ath6kl *ar = file->private_data;
1401 struct ath6kl_vif *vif; 1398 struct ath6kl_vif *vif;
1402 char buf[200]; 1399 char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
1575 const char __user *user_buf, 1572 const char __user *user_buf,
1576 size_t count, loff_t *ppos) 1573 size_t count, loff_t *ppos)
1577{ 1574{
1578
1579 struct ath6kl *ar = file->private_data; 1575 struct ath6kl *ar = file->private_data;
1580 struct ath6kl_vif *vif; 1576 struct ath6kl_vif *vif;
1581 char buf[100]; 1577 char buf[100];
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index ca9ba005f287..e194c10d9f00 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
97 struct ath6kl_irq_proc_registers *irq_proc_reg, 97 struct ath6kl_irq_proc_registers *irq_proc_reg,
98 struct ath6kl_irq_enable_reg *irq_en_reg) 98 struct ath6kl_irq_enable_reg *irq_en_reg)
99{ 99{
100
101} 100}
101
102static inline void dump_cred_dist_stats(struct htc_target *target) 102static inline void dump_cred_dist_stats(struct htc_target *target)
103{ 103{
104} 104}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index fea7709b5dda..18c070850a09 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
37 buf = req->virt_dma_buf; 37 buf = req->virt_dma_buf;
38 38
39 for (i = 0; i < req->scat_entries; i++) { 39 for (i = 0; i < req->scat_entries; i++) {
40
41 if (from_dma) 40 if (from_dma)
42 memcpy(req->scat_list[i].buf, buf, 41 memcpy(req->scat_list[i].buf, buf,
43 req->scat_list[i].len); 42 req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
116 le32_to_cpu(regdump_val[i + 2]), 115 le32_to_cpu(regdump_val[i + 2]),
117 le32_to_cpu(regdump_val[i + 3])); 116 le32_to_cpu(regdump_val[i + 3]));
118 } 117 }
119
120} 118}
121 119
122static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) 120static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
701 699
702fail_setup: 700fail_setup:
703 return status; 701 return status;
704
705} 702}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 61f6b21fb0ae..dc6bd8cd9b83 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -197,9 +197,9 @@ struct hif_scatter_req {
197 /* bounce buffer for upper layers to copy to/from */ 197 /* bounce buffer for upper layers to copy to/from */
198 u8 *virt_dma_buf; 198 u8 *virt_dma_buf;
199 199
200 struct hif_scatter_item scat_list[1];
201
202 u32 scat_q_depth; 200 u32 scat_q_depth;
201
202 struct hif_scatter_item scat_list[0];
203}; 203};
204 204
205struct ath6kl_irq_proc_registers { 205struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 65e5b719093d..e481f14b9878 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
112 if (cur_ep_dist->endpoint == ENDPOINT_0) 112 if (cur_ep_dist->endpoint == ENDPOINT_0)
113 continue; 113 continue;
114 114
115 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) 115 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
116 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; 116 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
117 else { 117 } else {
118 /* 118 /*
119 * For the remaining data endpoints, we assume that 119 * For the remaining data endpoints, we assume that
120 * each cred_per_msg are the same. We use a simple 120 * each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
129 count = (count * 3) >> 2; 129 count = (count * 3) >> 2;
130 count = max(count, cur_ep_dist->cred_per_msg); 130 count = max(count, cur_ep_dist->cred_per_msg);
131 cur_ep_dist->cred_norm = count; 131 cur_ep_dist->cred_norm = count;
132
133 } 132 }
134 133
135 ath6kl_dbg(ATH6KL_DBG_CREDIT, 134 ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
549 enum htc_endpoint_id eid, unsigned int len, 548 enum htc_endpoint_id eid, unsigned int len,
550 int *req_cred) 549 int *req_cred)
551{ 550{
552
553 *req_cred = (len > target->tgt_cred_sz) ? 551 *req_cred = (len > target->tgt_cred_sz) ?
554 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; 552 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
555 553
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
608 unsigned int len; 606 unsigned int len;
609 607
610 while (true) { 608 while (true) {
611
612 flags = 0; 609 flags = 0;
613 610
614 if (list_empty(&endpoint->txq)) 611 if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
889 ac = target->dev->ar->ep2ac_map[endpoint->eid]; 886 ac = target->dev->ar->ep2ac_map[endpoint->eid];
890 887
891 while (true) { 888 while (true) {
892
893 if (list_empty(&endpoint->txq)) 889 if (list_empty(&endpoint->txq))
894 break; 890 break;
895 891
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1190 list_add_tail(&packet->list, &container); 1186 list_add_tail(&packet->list, &container);
1191 htc_tx_complete(endpoint, &container); 1187 htc_tx_complete(endpoint, &container);
1192 } 1188 }
1193
1194} 1189}
1195 1190
1196static void ath6kl_htc_flush_txep_all(struct htc_target *target) 1191static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
1394 1389
1395 ep_cb = ep->ep_cb; 1390 ep_cb = ep->ep_cb;
1396 for (j = 0; j < n_msg; j++) { 1391 for (j = 0; j < n_msg; j++) {
1397
1398 /* 1392 /*
1399 * Reset flag, any packets allocated using the 1393 * Reset flag, any packets allocated using the
1400 * rx_alloc() API cannot be recycled on 1394 * rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
1424 } 1418 }
1425 } 1419 }
1426 1420
1427 if (list_empty(&ep->rx_bufq)) 1421 if (list_empty(&ep->rx_bufq)) {
1428 packet = NULL; 1422 packet = NULL;
1429 else { 1423 } else {
1430 packet = list_first_entry(&ep->rx_bufq, 1424 packet = list_first_entry(&ep->rx_bufq,
1431 struct htc_packet, list); 1425 struct htc_packet, list);
1432 list_del(&packet->list); 1426 list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
1487 spin_lock_bh(&target->rx_lock); 1481 spin_lock_bh(&target->rx_lock);
1488 1482
1489 for (i = 0; i < msg; i++) { 1483 for (i = 0; i < msg; i++) {
1490
1491 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; 1484 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1492 1485
1493 if (htc_hdr->eid >= ENDPOINT_MAX) { 1486 if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
1708 lk_ahd = (struct htc_lookahead_report *) record_buf; 1701 lk_ahd = (struct htc_lookahead_report *) record_buf;
1709 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && 1702 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1710 next_lk_ahds) { 1703 next_lk_ahds) {
1711
1712 ath6kl_dbg(ATH6KL_DBG_HTC, 1704 ath6kl_dbg(ATH6KL_DBG_HTC,
1713 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", 1705 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1714 lk_ahd->pre_valid, lk_ahd->post_valid); 1706 lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
1755 } 1747 }
1756 1748
1757 return 0; 1749 return 0;
1758
1759} 1750}
1760 1751
1761static int htc_proc_trailer(struct htc_target *target, 1752static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
1776 status = 0; 1767 status = 0;
1777 1768
1778 while (len > 0) { 1769 while (len > 0) {
1779
1780 if (len < sizeof(struct htc_record_hdr)) { 1770 if (len < sizeof(struct htc_record_hdr)) {
1781 status = -ENOMEM; 1771 status = -ENOMEM;
1782 break; 1772 break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
2098 } 2088 }
2099 2089
2100 if (!fetched_pkts) { 2090 if (!fetched_pkts) {
2101
2102 packet = list_first_entry(rx_pktq, struct htc_packet, 2091 packet = list_first_entry(rx_pktq, struct htc_packet,
2103 list); 2092 list);
2104 2093
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2173 look_aheads[0] = msg_look_ahead; 2162 look_aheads[0] = msg_look_ahead;
2174 2163
2175 while (true) { 2164 while (true) {
2176
2177 /* 2165 /*
2178 * First lookahead sets the expected endpoint IDs for all 2166 * First lookahead sets the expected endpoint IDs for all
2179 * packets in a bundle. 2167 * packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
2825 packet->buf = packet->buf_start; 2813 packet->buf = packet->buf_start;
2826 packet->endpoint = ENDPOINT_0; 2814 packet->endpoint = ENDPOINT_0;
2827 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); 2815 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2828 } else 2816 } else {
2829 list_add_tail(&packet->list, &target->free_ctrl_txbuf); 2817 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2818 }
2830 } 2819 }
2831 2820
2832 return 0; 2821 return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 67aa924ed8b3..756fe52a12c8 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
137 credits_required = 0; 137 credits_required = 0;
138 138
139 } else { 139 } else {
140
141 if (ep->cred_dist.credits < credits_required) 140 if (ep->cred_dist.credits < credits_required)
142 break; 141 break;
143 142
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
169 /* queue this packet into the caller's queue */ 168 /* queue this packet into the caller's queue */
170 list_add_tail(&packet->list, queue); 169 list_add_tail(&packet->list, queue);
171 } 170 }
172
173} 171}
174 172
175static void get_htc_packet(struct htc_target *target, 173static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
279 list_add(&packet->list, pkt_queue); 277 list_add(&packet->list, pkt_queue);
280 break; 278 break;
281 } 279 }
282
283 } 280 }
284 281
285 if (status != 0) { 282 if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
385 */ 382 */
386 list_for_each_entry_safe(packet, tmp_pkt, 383 list_for_each_entry_safe(packet, tmp_pkt,
387 txq, list) { 384 txq, list) {
388
389 ath6kl_dbg(ATH6KL_DBG_HTC, 385 ath6kl_dbg(ATH6KL_DBG_HTC,
390 "%s: Indicat overflowed TX pkts: %p\n", 386 "%s: Indicat overflowed TX pkts: %p\n",
391 __func__, packet); 387 __func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
403 list_move_tail(&packet->list, 399 list_move_tail(&packet->list,
404 &send_queue); 400 &send_queue);
405 } 401 }
406
407 } 402 }
408 403
409 if (list_empty(&send_queue)) { 404 if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
454 * enough transmit resources. 449 * enough transmit resources.
455 */ 450 */
456 while (true) { 451 while (true) {
457
458 if (get_queue_depth(&ep->txq) == 0) 452 if (get_queue_depth(&ep->txq) == 0)
459 break; 453 break;
460 454
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
495 } 489 }
496 490
497 spin_lock_bh(&target->tx_lock); 491 spin_lock_bh(&target->tx_lock);
498
499 } 492 }
493
500 /* done with this endpoint, we can clear the count */ 494 /* done with this endpoint, we can clear the count */
501 ep->tx_proc_cnt = 0; 495 ep->tx_proc_cnt = 0;
502 spin_unlock_bh(&target->tx_lock); 496 spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
1106 dev_kfree_skb(skb); 1100 dev_kfree_skb(skb);
1107 1101
1108 return status; 1102 return status;
1109
1110} 1103}
1111 1104
1112static void htc_flush_rx_queue(struct htc_target *target, 1105static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1258 tx_alloc = 0; 1251 tx_alloc = 0;
1259 1252
1260 } else { 1253 } else {
1261
1262 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); 1254 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1263 if (tx_alloc == 0) { 1255 if (tx_alloc == 0) {
1264 status = -ENOMEM; 1256 status = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 4f316bdcbab5..d5ef211f261c 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
1192 1192
1193 if (board_ext_address && 1193 if (board_ext_address &&
1194 ar->fw_board_len == (board_data_size + board_ext_data_size)) { 1194 ar->fw_board_len == (board_data_size + board_ext_data_size)) {
1195
1196 /* write extended board data */ 1195 /* write extended board data */
1197 ath6kl_dbg(ATH6KL_DBG_BOOT, 1196 ath6kl_dbg(ATH6KL_DBG_BOOT,
1198 "writing extended board data to 0x%x (%d B)\n", 1197 "writing extended board data to 0x%x (%d B)\n",
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5839fc23bdc7..d56554674da4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
571 571
572static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel) 572static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
573{ 573{
574
575 struct ath6kl *ar = vif->ar; 574 struct ath6kl *ar = vif->ar;
576 575
577 vif->profile.ch = cpu_to_le16(channel); 576 vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
600 599
601static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel) 600static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
602{ 601{
603
604 struct ath6kl_vif *vif; 602 struct ath6kl_vif *vif;
605 int res = 0; 603 int res = 0;
606 604
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
692 cfg80211_michael_mic_failure(vif->ndev, sta->mac, 690 cfg80211_michael_mic_failure(vif->ndev, sta->mac,
693 NL80211_KEYTYPE_PAIRWISE, keyid, 691 NL80211_KEYTYPE_PAIRWISE, keyid,
694 tsc, GFP_KERNEL); 692 tsc, GFP_KERNEL);
695 } else 693 } else {
696 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); 694 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
697 695 }
698} 696}
699 697
700static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) 698static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
1093 if (test_bit(CONNECTED, &vif->flags)) { 1091 if (test_bit(CONNECTED, &vif->flags)) {
1094 netif_carrier_on(dev); 1092 netif_carrier_on(dev);
1095 netif_wake_queue(dev); 1093 netif_wake_queue(dev);
1096 } else 1094 } else {
1097 netif_carrier_off(dev); 1095 netif_carrier_off(dev);
1096 }
1098 1097
1099 return 0; 1098 return 0;
1100} 1099}
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
1146 dev->features = features | NETIF_F_RXCSUM; 1145 dev->features = features | NETIF_F_RXCSUM;
1147 return err; 1146 return err;
1148 } 1147 }
1149
1150 } 1148 }
1151 1149
1152 return err; 1150 return err;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 7126bdd4236c..339d89f14d32 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
348 int i, scat_req_sz, scat_list_sz, size; 348 int i, scat_req_sz, scat_list_sz, size;
349 u8 *virt_buf; 349 u8 *virt_buf;
350 350
351 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); 351 scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
352 scat_req_sz = sizeof(*s_req) + scat_list_sz; 352 scat_req_sz = sizeof(*s_req) + scat_list_sz;
353 353
354 if (!virt_scat) 354 if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
425 memcpy(tbuf, buf, len); 425 memcpy(tbuf, buf, len);
426 426
427 bounced = true; 427 bounced = true;
428 } else 428 } else {
429 tbuf = buf; 429 tbuf = buf;
430 }
430 431
431 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); 432 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
432 if ((request & HIF_READ) && bounced) 433 if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
441static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, 442static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
442 struct bus_request *req) 443 struct bus_request *req)
443{ 444{
444 if (req->scat_req) 445 if (req->scat_req) {
445 ath6kl_sdio_scat_rw(ar_sdio, req); 446 ath6kl_sdio_scat_rw(ar_sdio, req);
446 else { 447 } else {
447 void *context; 448 void *context;
448 int status; 449 int status;
449 450
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
656 list_add_tail(&s_req->list, &ar_sdio->scat_req); 657 list_add_tail(&s_req->list, &ar_sdio->scat_req);
657 658
658 spin_unlock_bh(&ar_sdio->scat_lock); 659 spin_unlock_bh(&ar_sdio->scat_lock);
659
660} 660}
661 661
662/* scatter gather read write request */ 662/* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
674 "hif-scatter: total len: %d scatter entries: %d\n", 674 "hif-scatter: total len: %d scatter entries: %d\n",
675 scat_req->len, scat_req->scat_entries); 675 scat_req->len, scat_req->scat_entries);
676 676
677 if (request & HIF_SYNCHRONOUS) 677 if (request & HIF_SYNCHRONOUS) {
678 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 678 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
679 else { 679 } else {
680 spin_lock_bh(&ar_sdio->wr_async_lock); 680 spin_lock_bh(&ar_sdio->wr_async_lock);
681 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 681 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
682 spin_unlock_bh(&ar_sdio->wr_async_lock); 682 spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
856 856
857 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || 857 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
858 (!ar->suspend_mode && wow)) { 858 (!ar->suspend_mode && wow)) {
859
860 ret = ath6kl_set_sdio_pm_caps(ar); 859 ret = ath6kl_set_sdio_pm_caps(ar);
861 if (ret) 860 if (ret)
862 goto cut_pwr; 861 goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
878 877
879 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || 878 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
880 !ar->suspend_mode || try_deepsleep) { 879 !ar->suspend_mode || try_deepsleep) {
881
882 flags = sdio_get_host_pm_caps(func); 880 flags = sdio_get_host_pm_caps(func);
883 if (!(flags & MMC_PM_KEEP_POWER)) 881 if (!(flags & MMC_PM_KEEP_POWER))
884 goto cut_pwr; 882 goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1061 1059
1062 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); 1060 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1063 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { 1061 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1064
1065 /* 1062 /*
1066 * Hit the credit counter with a 4-byte access, the first byte 1063 * Hit the credit counter with a 4-byte access, the first byte
1067 * read will hit the counter and cause a decrement, while the 1064 * read will hit the counter and cause a decrement, while the
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a580a629a0da..d5eeeae7711b 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -289,7 +289,7 @@ struct host_interest {
289 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */ 289 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
290 290
291 /* test applications flags */ 291 /* test applications flags */
292 u32 hi_test_apps_related ; /* 0xdc */ 292 u32 hi_test_apps_related; /* 0xdc */
293 /* location of test script */ 293 /* location of test script */
294 u32 hi_ota_testscript; /* 0xe0 */ 294 u32 hi_ota_testscript; /* 0xe0 */
295 /* location of CAL data */ 295 /* location of CAL data */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index ebb24045a8ae..40432fe7a5d2 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
125 *flags |= WMI_DATA_HDR_FLAGS_UAPSD; 125 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
126 spin_unlock_bh(&conn->psq_lock); 126 spin_unlock_bh(&conn->psq_lock);
127 return false; 127 return false;
128 } else if (!conn->apsd_info) 128 } else if (!conn->apsd_info) {
129 return false; 129 return false;
130 }
130 131
131 if (test_bit(WMM_ENABLED, &vif->flags)) { 132 if (test_bit(WMM_ENABLED, &vif->flags)) {
132 ether_type = be16_to_cpu(datap->h_proto); 133 ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
316 cookie = NULL; 317 cookie = NULL;
317 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n", 318 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
318 skb, skb->len); 319 skb, skb->len);
319 } else 320 } else {
320 cookie = ath6kl_alloc_cookie(ar); 321 cookie = ath6kl_alloc_cookie(ar);
322 }
321 323
322 if (cookie == NULL) { 324 if (cookie == NULL) {
323 spin_unlock_bh(&ar->lock); 325 spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
359 struct ath6kl_vif *vif = netdev_priv(dev); 361 struct ath6kl_vif *vif = netdev_priv(dev);
360 u32 map_no = 0; 362 u32 map_no = 0;
361 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 363 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
362 u8 ac = 99 ; /* initialize to unmapped ac */ 364 u8 ac = 99; /* initialize to unmapped ac */
363 bool chk_adhoc_ps_mapping = false; 365 bool chk_adhoc_ps_mapping = false;
364 int ret; 366 int ret;
365 struct wmi_tx_meta_v2 meta_v2; 367 struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
449 if (ret) 451 if (ret)
450 goto fail_tx; 452 goto fail_tx;
451 } 453 }
452 } else 454 } else {
453 goto fail_tx; 455 goto fail_tx;
456 }
454 457
455 spin_lock_bh(&ar->lock); 458 spin_lock_bh(&ar->lock);
456 459
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
702 705
703 /* reap completed packets */ 706 /* reap completed packets */
704 while (!list_empty(packet_queue)) { 707 while (!list_empty(packet_queue)) {
705
706 packet = list_first_entry(packet_queue, struct htc_packet, 708 packet = list_first_entry(packet_queue, struct htc_packet,
707 list); 709 list);
708 list_del(&packet->list); 710 list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1089 else 1091 else
1090 skb_queue_tail(&rxtid->q, node->skb); 1092 skb_queue_tail(&rxtid->q, node->skb);
1091 node->skb = NULL; 1093 node->skb = NULL;
1092 } else 1094 } else {
1093 stats->num_hole++; 1095 stats->num_hole++;
1096 }
1094 1097
1095 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next); 1098 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1096 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1099 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1211 return is_queued; 1214 return is_queued;
1212 1215
1213 spin_lock_bh(&rxtid->lock); 1216 spin_lock_bh(&rxtid->lock);
1214 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1217 for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
1215 if (rxtid->hold_q[idx].skb) { 1218 if (rxtid->hold_q[idx].skb) {
1216 /* 1219 /*
1217 * There is a frame in the queue and no 1220 * There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1265 is_apsdq_empty_at_start = is_apsdq_empty; 1268 is_apsdq_empty_at_start = is_apsdq_empty;
1266 1269
1267 while ((!is_apsdq_empty) && (num_frames_to_deliver)) { 1270 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1268
1269 spin_lock_bh(&conn->psq_lock); 1271 spin_lock_bh(&conn->psq_lock);
1270 skb = skb_dequeue(&conn->apsdq); 1272 skb = skb_dequeue(&conn->apsdq);
1271 is_apsdq_empty = skb_queue_empty(&conn->apsdq); 1273 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1606 if (!conn) 1608 if (!conn)
1607 return; 1609 return;
1608 aggr_conn = conn->aggr_conn; 1610 aggr_conn = conn->aggr_conn;
1609 } else 1611 } else {
1610 aggr_conn = vif->aggr_cntxt->aggr_conn; 1612 aggr_conn = vif->aggr_cntxt->aggr_conn;
1613 }
1611 1614
1612 if (aggr_process_recv_frm(aggr_conn, tid, seq_no, 1615 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1613 is_amsdu, skb)) { 1616 is_amsdu, skb)) {
1614 /* aggregation code will handle the skb */ 1617 /* aggregation code will handle the skb */
1615 return; 1618 return;
1616 } 1619 }
1617 } else if (!is_broadcast_ether_addr(datap->h_dest)) 1620 } else if (!is_broadcast_ether_addr(datap->h_dest)) {
1618 vif->net_stats.multicast++; 1621 vif->net_stats.multicast++;
1622 }
1619 1623
1620 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1624 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1621} 1625}
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1710 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1714 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1711 if (sta) 1715 if (sta)
1712 aggr_conn = sta->aggr_conn; 1716 aggr_conn = sta->aggr_conn;
1713 } else 1717 } else {
1714 aggr_conn = vif->aggr_cntxt->aggr_conn; 1718 aggr_conn = vif->aggr_cntxt->aggr_conn;
1719 }
1715 1720
1716 if (!aggr_conn) 1721 if (!aggr_conn)
1717 return; 1722 return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1766 skb_queue_head_init(&rxtid->q); 1771 skb_queue_head_init(&rxtid->q);
1767 spin_lock_init(&rxtid->lock); 1772 spin_lock_init(&rxtid->lock);
1768 } 1773 }
1769
1770} 1774}
1771 1775
1772struct aggr_info *aggr_init(struct ath6kl_vif *vif) 1776struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1806 sta = ath6kl_find_sta_by_aid(vif->ar, aid); 1810 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1807 if (sta) 1811 if (sta)
1808 aggr_conn = sta->aggr_conn; 1812 aggr_conn = sta->aggr_conn;
1809 } else 1813 } else {
1810 aggr_conn = vif->aggr_cntxt->aggr_conn; 1814 aggr_conn = vif->aggr_cntxt->aggr_conn;
1815 }
1811 1816
1812 if (!aggr_conn) 1817 if (!aggr_conn)
1813 return; 1818 return;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 56c3fd5cef65..3afc5a463d06 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
236 break; 236 break;
237 kfree(urb_context); 237 kfree(urb_context);
238 } 238 }
239
240} 239}
241 240
242static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb) 241static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
245 244
246 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) 245 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
247 ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]); 246 ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
248
249} 247}
250 248
251static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb, 249static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 8b4ce28e3ce8..4d7f9e4712e9 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
289 ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) + 289 ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
290 sizeof(struct ath6kl_llc_snap_hdr), 290 sizeof(struct ath6kl_llc_snap_hdr),
291 layer2_priority); 291 layer2_priority);
292 } else 292 } else {
293 usr_pri = layer2_priority & 0x7; 293 usr_pri = layer2_priority & 0x7;
294 }
294 295
295 /* 296 /*
296 * Queue the EAPOL frames in the same WMM_AC_VO queue 297 * Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
359 hdr_size = roundup(sizeof(struct ieee80211_qos_hdr), 360 hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
360 sizeof(u32)); 361 sizeof(u32));
361 skb_pull(skb, hdr_size); 362 skb_pull(skb, hdr_size);
362 } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) 363 } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
363 skb_pull(skb, sizeof(struct ieee80211_hdr_3addr)); 364 skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
365 }
364 366
365 datap = skb->data; 367 datap = skb->data;
366 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap); 368 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
936 938
937static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len) 939static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
938{ 940{
939
940 struct ath6kl_wmi_regdomain *ev; 941 struct ath6kl_wmi_regdomain *ev;
941 struct country_code_to_enum_rd *country = NULL; 942 struct country_code_to_enum_rd *country = NULL;
942 struct reg_dmn_pair_mapping *regpair = NULL; 943 struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
946 ev = (struct ath6kl_wmi_regdomain *) datap; 947 ev = (struct ath6kl_wmi_regdomain *) datap;
947 reg_code = le32_to_cpu(ev->reg_code); 948 reg_code = le32_to_cpu(ev->reg_code);
948 949
949 if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) 950 if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
950 country = ath6kl_regd_find_country((u16) reg_code); 951 country = ath6kl_regd_find_country((u16) reg_code);
951 else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) { 952 } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
952
953 regpair = ath6kl_get_regpair((u16) reg_code); 953 regpair = ath6kl_get_regpair((u16) reg_code);
954 country = ath6kl_regd_find_country_by_rd((u16) reg_code); 954 country = ath6kl_regd_find_country_by_rd((u16) reg_code);
955 if (regpair) 955 if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
1499 1499
1500 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) && 1500 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
1501 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) { 1501 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
1502
1503 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); 1502 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1504 tsinfo = le16_to_cpu(ts->tsinfo); 1503 tsinfo = le16_to_cpu(ts->tsinfo);
1505 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & 1504 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
1530 * for delete qos stream from AP 1529 * for delete qos stream from AP
1531 */ 1530 */
1532 else if (reply->cac_indication == CAC_INDICATION_DELETE) { 1531 else if (reply->cac_indication == CAC_INDICATION_DELETE) {
1533
1534 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion); 1532 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1535 tsinfo = le16_to_cpu(ts->tsinfo); 1533 tsinfo = le16_to_cpu(ts->tsinfo);
1536 ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & 1534 ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2322,7 +2320,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
2322 return ret; 2320 return ret;
2323} 2321}
2324 2322
2325int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk) 2323int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk)
2326{ 2324{
2327 struct sk_buff *skb; 2325 struct sk_buff *skb;
2328 struct wmi_add_krk_cmd *cmd; 2326 struct wmi_add_krk_cmd *cmd;
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2479 goto free_data_skb; 2477 goto free_data_skb;
2480 2478
2481 for (index = 0; index < num_pri_streams; index++) { 2479 for (index = 0; index < num_pri_streams; index++) {
2482
2483 if (WARN_ON(!data_sync_bufs[index].skb)) 2480 if (WARN_ON(!data_sync_bufs[index].skb))
2484 goto free_data_skb; 2481 goto free_data_skb;
2485 2482
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
2704 2701
2705 for (i = 0; i < WMM_NUM_AC; i++) { 2702 for (i = 0; i < WMM_NUM_AC; i++) {
2706 if (stream_exist & (1 << i)) { 2703 if (stream_exist & (1 << i)) {
2707
2708 /* 2704 /*
2709 * FIXME: Is this lock & unlock inside 2705 * FIXME: Is this lock & unlock inside
2710 * for loop correct? may need rework. 2706 * for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2870 if (host_mode == ATH6KL_HOST_MODE_ASLEEP) { 2866 if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
2871 ath6kl_wmi_relinquish_implicit_pstream_credits(wmi); 2867 ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
2872 cmd->asleep = cpu_to_le32(1); 2868 cmd->asleep = cpu_to_le32(1);
2873 } else 2869 } else {
2874 cmd->awake = cpu_to_le32(1); 2870 cmd->awake = cpu_to_le32(1);
2871 }
2875 2872
2876 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, 2873 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2877 WMI_SET_HOST_SLEEP_MODE_CMDID, 2874 WMI_SET_HOST_SLEEP_MODE_CMDID,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 5c702ae4d9f8..bb23fc00111d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
898 * flags here 898 * flags here
899 */ 899 */
900enum wmi_scan_ctrl_flags_bits { 900enum wmi_scan_ctrl_flags_bits {
901
902 /* set if can scan in the connect cmd */ 901 /* set if can scan in the connect cmd */
903 CONNECT_SCAN_CTRL_FLAGS = 0x01, 902 CONNECT_SCAN_CTRL_FLAGS = 0x01,
904 903
@@ -2617,7 +2616,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
2617 u8 *key_material, 2616 u8 *key_material,
2618 u8 key_op_ctrl, u8 *mac_addr, 2617 u8 key_op_ctrl, u8 *mac_addr,
2619 enum wmi_sync_flag sync_flag); 2618 enum wmi_sync_flag sync_flag);
2620int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk); 2619int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk);
2621int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index); 2620int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
2622int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid, 2621int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
2623 const u8 *pmkid, bool set); 2622 const u8 *pmkid, bool set);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 8e1c7b0fe76c..8fcd586d1c39 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -53,7 +53,8 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
53obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o 53obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
54ath9k_common-y:= common.o \ 54ath9k_common-y:= common.o \
55 common-init.o \ 55 common-init.o \
56 common-beacon.o 56 common-beacon.o \
57 common-debug.o
57 58
58ath9k_htc-y += htc_hst.o \ 59ath9k_htc-y += htc_hst.o \
59 hif_usb.o \ 60 hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 0a6163e9248c..c38399bc9aa9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
410 {0x00009e30, 0x06336f77}, 410 {0x00009e30, 0x06336f77},
411 {0x00009e34, 0x6af6532f}, 411 {0x00009e34, 0x6af6532f},
412 {0x00009e38, 0x0cc80c00}, 412 {0x00009e38, 0x0cc80c00},
413 {0x00009e40, 0x0d261820}, 413 {0x00009e40, 0x0d261800},
414 {0x00009e4c, 0x00001004}, 414 {0x00009e4c, 0x00001004},
415 {0x00009e50, 0x00ff03f1}, 415 {0x00009e50, 0x00ff03f1},
416 {0x00009e54, 0x00000000}, 416 {0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f76139bbb74f..2c42ff05efa3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
592 {0x00009e30, 0x06336f77}, 592 {0x00009e30, 0x06336f77},
593 {0x00009e34, 0x6af6532f}, 593 {0x00009e34, 0x6af6532f},
594 {0x00009e38, 0x0cc80c00}, 594 {0x00009e38, 0x0cc80c00},
595 {0x00009e40, 0x0d261820}, 595 {0x00009e40, 0x0d261800},
596 {0x00009e4c, 0x00001004}, 596 {0x00009e4c, 0x00001004},
597 {0x00009e50, 0x00ff03f1}, 597 {0x00009e50, 0x00ff03f1},
598 {0x00009fc0, 0x803e4788}, 598 {0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0ac8be96097f..2154efcd3900 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
231 {0x00009e30, 0x06336f77}, 231 {0x00009e30, 0x06336f77},
232 {0x00009e34, 0x6af6532f}, 232 {0x00009e34, 0x6af6532f},
233 {0x00009e38, 0x0cc80c00}, 233 {0x00009e38, 0x0cc80c00},
234 {0x00009e40, 0x0d261820}, 234 {0x00009e40, 0x0d261800},
235 {0x00009e4c, 0x00001004}, 235 {0x00009e4c, 0x00001004},
236 {0x00009e50, 0x00ff03f1}, 236 {0x00009e50, 0x00ff03f1},
237 {0x00009fc0, 0x803e4788}, 237 {0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index a01f0edb6518..b995ffe88b33 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
318 {0x00009e30, 0x06336f77}, 318 {0x00009e30, 0x06336f77},
319 {0x00009e34, 0x6af6532f}, 319 {0x00009e34, 0x6af6532f},
320 {0x00009e38, 0x0cc80c00}, 320 {0x00009e38, 0x0cc80c00},
321 {0x00009e40, 0x0d261820}, 321 {0x00009e40, 0x0d261800},
322 {0x00009e4c, 0x00001004}, 322 {0x00009e4c, 0x00001004},
323 {0x00009e50, 0x00ff03f1}, 323 {0x00009e50, 0x00ff03f1},
324 {0x00009e54, 0x00000000}, 324 {0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
348 {0x0000a370, 0x00000000}, 348 {0x0000a370, 0x00000000},
349 {0x0000a390, 0x00000001}, 349 {0x0000a390, 0x00000001},
350 {0x0000a394, 0x00000444}, 350 {0x0000a394, 0x00000444},
351 {0x0000a398, 0x00000000}, 351 {0x0000a398, 0x001f0e0f},
352 {0x0000a39c, 0x210d0401}, 352 {0x0000a39c, 0x0075393f},
353 {0x0000a3a0, 0xab9a7144}, 353 {0x0000a3a0, 0xb79f6427},
354 {0x0000a3a4, 0x00000000}, 354 {0x0000a3a4, 0x00000000},
355 {0x0000a3a8, 0xaaaaaaaa}, 355 {0x0000a3a8, 0xaaaaaaaa},
356 {0x0000a3ac, 0x3c466478}, 356 {0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 3c9113d9b1bc..8e5c3b9786e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
257 {0x0000a370, 0x00000000}, 257 {0x0000a370, 0x00000000},
258 {0x0000a390, 0x00000001}, 258 {0x0000a390, 0x00000001},
259 {0x0000a394, 0x00000444}, 259 {0x0000a394, 0x00000444},
260 {0x0000a398, 0x1f020503}, 260 {0x0000a398, 0x001f0e0f},
261 {0x0000a39c, 0x29180c03}, 261 {0x0000a39c, 0x0075393f},
262 {0x0000a3a0, 0x9a8b6844}, 262 {0x0000a3a0, 0xb79f6427},
263 {0x0000a3a4, 0x000000ff}, 263 {0x0000a3a4, 0x000000ff},
264 {0x0000a3a8, 0x6a6a6a6a}, 264 {0x0000a3a8, 0x6a6a6a6a},
265 {0x0000a3ac, 0x6a6a6a6a}, 265 {0x0000a3ac, 0x6a6a6a6a},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index e6aec2c0207f..a5ca65240af3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
90 {0x00009e30, 0x06336f77}, 90 {0x00009e30, 0x06336f77},
91 {0x00009e34, 0x6af6532f}, 91 {0x00009e34, 0x6af6532f},
92 {0x00009e38, 0x0cc80c00}, 92 {0x00009e38, 0x0cc80c00},
93 {0x00009e40, 0x0d261820}, 93 {0x00009e40, 0x0d261800},
94 {0x00009e4c, 0x00001004}, 94 {0x00009e4c, 0x00001004},
95 {0x00009e50, 0x00ff03f1}, 95 {0x00009e50, 0x00ff03f1},
96 {0x00009e54, 0x00000000}, 96 {0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3ba03dde4215..2ca8f7e06174 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -23,8 +23,8 @@
23#include <linux/leds.h> 23#include <linux/leds.h>
24#include <linux/completion.h> 24#include <linux/completion.h>
25 25
26#include "debug.h"
27#include "common.h" 26#include "common.h"
27#include "debug.h"
28#include "mci.h" 28#include "mci.h"
29#include "dfs.h" 29#include "dfs.h"
30#include "spectral.h" 30#include "spectral.h"
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
114#define ATH_TXFIFO_DEPTH 8 114#define ATH_TXFIFO_DEPTH 8
115#define ATH_TX_ERROR 0x01 115#define ATH_TX_ERROR 0x01
116 116
117/* Stop tx traffic 1ms before the GO goes away */
118#define ATH_P2P_PS_STOP_TIME 1000
119
117#define IEEE80211_SEQ_SEQ_SHIFT 4 120#define IEEE80211_SEQ_SEQ_SHIFT 4
118#define IEEE80211_SEQ_MAX 4096 121#define IEEE80211_SEQ_MAX 4096
119#define IEEE80211_WEP_IVLEN 3 122#define IEEE80211_WEP_IVLEN 3
@@ -271,6 +274,7 @@ struct ath_node {
271#ifdef CONFIG_ATH9K_STATION_STATISTICS 274#ifdef CONFIG_ATH9K_STATION_STATISTICS
272 struct ath_rx_rate_stats rx_rate_stats; 275 struct ath_rx_rate_stats rx_rate_stats;
273#endif 276#endif
277 u8 key_idx[4];
274}; 278};
275 279
276struct ath_tx_control { 280struct ath_tx_control {
@@ -366,11 +370,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
366/********/ 370/********/
367 371
368struct ath_vif { 372struct ath_vif {
373 struct ieee80211_vif *vif;
369 struct ath_node mcast_node; 374 struct ath_node mcast_node;
370 int av_bslot; 375 int av_bslot;
371 bool primary_sta_vif; 376 bool primary_sta_vif;
372 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ 377 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
373 struct ath_buf *av_bcbuf; 378 struct ath_buf *av_bcbuf;
379
380 /* P2P Client */
381 struct ieee80211_noa_data noa;
374}; 382};
375 383
376struct ath9k_vif_iter_data { 384struct ath9k_vif_iter_data {
@@ -463,6 +471,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
463void ath_update_survey_nf(struct ath_softc *sc, int channel); 471void ath_update_survey_nf(struct ath_softc *sc, int channel);
464void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type); 472void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
465void ath_ps_full_sleep(unsigned long data); 473void ath_ps_full_sleep(unsigned long data);
474void ath9k_p2p_ps_timer(void *priv);
475void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
466 476
467/**********/ 477/**********/
468/* BTCOEX */ 478/* BTCOEX */
@@ -713,6 +723,9 @@ struct ath_softc {
713 struct completion paprd_complete; 723 struct completion paprd_complete;
714 wait_queue_head_t tx_wait; 724 wait_queue_head_t tx_wait;
715 725
726 struct ath_gen_timer *p2p_ps_timer;
727 struct ath_vif *p2p_ps_vif;
728
716 unsigned long driver_data; 729 unsigned long driver_data;
717 730
718 u8 gtt_cnt; 731 u8 gtt_cnt;
@@ -757,6 +770,7 @@ struct ath_softc {
757 struct ath_ant_comb ant_comb; 770 struct ath_ant_comb ant_comb;
758 u8 ant_tx, ant_rx; 771 u8 ant_tx, ant_rx;
759 struct dfs_pattern_detector *dfs_detector; 772 struct dfs_pattern_detector *dfs_detector;
773 u64 dfs_prev_pulse_ts;
760 u32 wow_enabled; 774 u32 wow_enabled;
761 /* relay(fs) channel for spectral scan */ 775 /* relay(fs) channel for spectral scan */
762 struct rchan *rfs_chan_spec_scan; 776 struct rchan *rfs_chan_spec_scan;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index bd9e634879e6..e387f0b2954a 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -537,8 +537,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
537 cur_conf->dtim_period = bss_conf->dtim_period; 537 cur_conf->dtim_period = bss_conf->dtim_period;
538 cur_conf->dtim_count = 1; 538 cur_conf->dtim_count = 1;
539 cur_conf->ibss_creator = bss_conf->ibss_creator; 539 cur_conf->ibss_creator = bss_conf->ibss_creator;
540 cur_conf->bmiss_timeout =
541 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
542 540
543 /* 541 /*
544 * It looks like mac80211 may end up using beacon interval of zero in 542 * It looks like mac80211 may end up using beacon interval of zero in
@@ -549,6 +547,9 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
549 if (cur_conf->beacon_interval == 0) 547 if (cur_conf->beacon_interval == 0)
550 cur_conf->beacon_interval = 100; 548 cur_conf->beacon_interval = 100;
551 549
550 cur_conf->bmiss_timeout =
551 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
552
552 /* 553 /*
553 * We don't parse dtim period from mac80211 during the driver 554 * We don't parse dtim period from mac80211 during the driver
554 * initialization as it breaks association with hidden-ssid 555 * initialization as it breaks association with hidden-ssid
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
new file mode 100644
index 000000000000..3b289f933405
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "common.h"
18
19static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
20 size_t count, loff_t *ppos)
21{
22 struct ath_hw *ah = file->private_data;
23 u32 len = 0, size = 6000;
24 char *buf;
25 size_t retval;
26
27 buf = kzalloc(size, GFP_KERNEL);
28 if (buf == NULL)
29 return -ENOMEM;
30
31 len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
32
33 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
34 kfree(buf);
35
36 return retval;
37}
38
39static const struct file_operations fops_modal_eeprom = {
40 .read = read_file_modal_eeprom,
41 .open = simple_open,
42 .owner = THIS_MODULE,
43 .llseek = default_llseek,
44};
45
46
47void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
48 struct ath_hw *ah)
49{
50 debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah,
51 &fops_modal_eeprom);
52}
53EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom);
54
55static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 struct ath_hw *ah = file->private_data;
59 u32 len = 0, size = 1500;
60 ssize_t retval = 0;
61 char *buf;
62
63 buf = kzalloc(size, GFP_KERNEL);
64 if (!buf)
65 return -ENOMEM;
66
67 len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
68
69 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
70 kfree(buf);
71
72 return retval;
73}
74
75static const struct file_operations fops_base_eeprom = {
76 .read = read_file_base_eeprom,
77 .open = simple_open,
78 .owner = THIS_MODULE,
79 .llseek = default_llseek,
80};
81
82void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
83 struct ath_hw *ah)
84{
85 debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah,
86 &fops_base_eeprom);
87}
88EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom);
89
90void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
91 struct ath_rx_status *rs)
92{
93#define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++
94#define RX_CMN_STAT_INC(c) (rxstats->c++)
95
96 RX_CMN_STAT_INC(rx_pkts_all);
97 rxstats->rx_bytes_all += rs->rs_datalen;
98
99 if (rs->rs_status & ATH9K_RXERR_CRC)
100 RX_CMN_STAT_INC(crc_err);
101 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
102 RX_CMN_STAT_INC(decrypt_crc_err);
103 if (rs->rs_status & ATH9K_RXERR_MIC)
104 RX_CMN_STAT_INC(mic_err);
105 if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
106 RX_CMN_STAT_INC(pre_delim_crc_err);
107 if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
108 RX_CMN_STAT_INC(post_delim_crc_err);
109 if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
110 RX_CMN_STAT_INC(decrypt_busy_err);
111
112 if (rs->rs_status & ATH9K_RXERR_PHY) {
113 RX_CMN_STAT_INC(phy_err);
114 if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
115 RX_PHY_ERR_INC(rs->rs_phyerr);
116 }
117
118#undef RX_CMN_STAT_INC
119#undef RX_PHY_ERR_INC
120}
121EXPORT_SYMBOL(ath9k_cmn_debug_stat_rx);
122
123static ssize_t read_file_recv(struct file *file, char __user *user_buf,
124 size_t count, loff_t *ppos)
125{
126#define RXS_ERR(s, e) \
127 do { \
128 len += scnprintf(buf + len, size - len, \
129 "%18s : %10u\n", s, \
130 rxstats->e); \
131 } while (0)
132
133 struct ath_rx_stats *rxstats = file->private_data;
134 char *buf;
135 unsigned int len = 0, size = 1600;
136 ssize_t retval = 0;
137
138 buf = kzalloc(size, GFP_KERNEL);
139 if (buf == NULL)
140 return -ENOMEM;
141
142 RXS_ERR("PKTS-ALL", rx_pkts_all);
143 RXS_ERR("BYTES-ALL", rx_bytes_all);
144 RXS_ERR("BEACONS", rx_beacons);
145 RXS_ERR("FRAGS", rx_frags);
146 RXS_ERR("SPECTRAL", rx_spectral);
147
148 RXS_ERR("CRC ERR", crc_err);
149 RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
150 RXS_ERR("PHY ERR", phy_err);
151 RXS_ERR("MIC ERR", mic_err);
152 RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
153 RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
154 RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
155 RXS_ERR("LENGTH-ERR", rx_len_err);
156 RXS_ERR("OOM-ERR", rx_oom_err);
157 RXS_ERR("RATE-ERR", rx_rate_err);
158 RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
159
160 if (len > size)
161 len = size;
162
163 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
164 kfree(buf);
165
166 return retval;
167
168#undef RXS_ERR
169}
170
171static const struct file_operations fops_recv = {
172 .read = read_file_recv,
173 .open = simple_open,
174 .owner = THIS_MODULE,
175 .llseek = default_llseek,
176};
177
178void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
179 struct ath_rx_stats *rxstats)
180{
181 debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats,
182 &fops_recv);
183}
184EXPORT_SYMBOL(ath9k_cmn_debug_recv);
185
186static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
187 size_t count, loff_t *ppos)
188{
189#define PHY_ERR(s, p) \
190 len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
191 rxstats->phy_err_stats[p]);
192
193 struct ath_rx_stats *rxstats = file->private_data;
194 char *buf;
195 unsigned int len = 0, size = 1600;
196 ssize_t retval = 0;
197
198 buf = kzalloc(size, GFP_KERNEL);
199 if (buf == NULL)
200 return -ENOMEM;
201
202 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
203 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
204 PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
205 PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
206 PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
207 PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
208 PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
209 PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
210 PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
211 PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
212 PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
213 PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
214 PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
215 PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
216 PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
217 PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
218 PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
219 PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
220 PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
221 PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
222 PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
223 PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
224 PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
225 PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
226 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
227 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
228
229 if (len > size)
230 len = size;
231
232 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
233 kfree(buf);
234
235 return retval;
236
237#undef PHY_ERR
238}
239
240static const struct file_operations fops_phy_err = {
241 .read = read_file_phy_err,
242 .open = simple_open,
243 .owner = THIS_MODULE,
244 .llseek = default_llseek,
245};
246
247void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
248 struct ath_rx_stats *rxstats)
249{
250 debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats,
251 &fops_phy_err);
252}
253EXPORT_SYMBOL(ath9k_cmn_debug_phy_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
new file mode 100644
index 000000000000..7c9788490f7f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17
18
19/**
20 * struct ath_rx_stats - RX Statistics
21 * @rx_pkts_all: No. of total frames received, including ones that
22 may have had errors.
23 * @rx_bytes_all: No. of total bytes received, including ones that
24 may have had errors.
25 * @crc_err: No. of frames with incorrect CRC value
26 * @decrypt_crc_err: No. of frames whose CRC check failed after
27 decryption process completed
28 * @phy_err: No. of frames whose reception failed because the PHY
29 encountered an error
30 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
31 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
32 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
33 * @decrypt_busy_err: Decryption interruptions counter
34 * @phy_err_stats: Individual PHY error statistics
35 * @rx_len_err: No. of frames discarded due to bad length.
36 * @rx_oom_err: No. of frames dropped due to OOM issues.
37 * @rx_rate_err: No. of frames dropped due to rate errors.
38 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
39 * @rx_beacons: No. of beacons received.
40 * @rx_frags: No. of rx-fragements received.
41 * @rx_spectral: No of spectral packets received.
42 */
43struct ath_rx_stats {
44 u32 rx_pkts_all;
45 u32 rx_bytes_all;
46 u32 crc_err;
47 u32 decrypt_crc_err;
48 u32 phy_err;
49 u32 mic_err;
50 u32 pre_delim_crc_err;
51 u32 post_delim_crc_err;
52 u32 decrypt_busy_err;
53 u32 phy_err_stats[ATH9K_PHYERR_MAX];
54 u32 rx_len_err;
55 u32 rx_oom_err;
56 u32 rx_rate_err;
57 u32 rx_too_many_frags_err;
58 u32 rx_beacons;
59 u32 rx_frags;
60 u32 rx_spectral;
61};
62
63void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
64 struct ath_hw *ah);
65void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
66 struct ath_hw *ah);
67void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
68 struct ath_rx_status *rs);
69void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
70 struct ath_rx_stats *rxstats);
71void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
72 struct ath_rx_stats *rxstats);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ca38116838f0..ffc454b18637 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,6 +23,7 @@
23 23
24#include "common-init.h" 24#include "common-init.h"
25#include "common-beacon.h" 25#include "common-beacon.h"
26#include "common-debug.h"
26 27
27/* Common header for Atheros 802.11n base driver cores */ 28/* Common header for Atheros 802.11n base driver cores */
28 29
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 780ff1bee6f6..6cc42be48d4e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -948,151 +948,11 @@ static const struct file_operations fops_reset = {
948 .llseek = default_llseek, 948 .llseek = default_llseek,
949}; 949};
950 950
951static ssize_t read_file_recv(struct file *file, char __user *user_buf,
952 size_t count, loff_t *ppos)
953{
954#define RXS_ERR(s, e) \
955 do { \
956 len += scnprintf(buf + len, size - len, \
957 "%18s : %10u\n", s, \
958 sc->debug.stats.rxstats.e);\
959 } while (0)
960
961 struct ath_softc *sc = file->private_data;
962 char *buf;
963 unsigned int len = 0, size = 1600;
964 ssize_t retval = 0;
965
966 buf = kzalloc(size, GFP_KERNEL);
967 if (buf == NULL)
968 return -ENOMEM;
969
970 RXS_ERR("PKTS-ALL", rx_pkts_all);
971 RXS_ERR("BYTES-ALL", rx_bytes_all);
972 RXS_ERR("BEACONS", rx_beacons);
973 RXS_ERR("FRAGS", rx_frags);
974 RXS_ERR("SPECTRAL", rx_spectral);
975
976 RXS_ERR("CRC ERR", crc_err);
977 RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
978 RXS_ERR("PHY ERR", phy_err);
979 RXS_ERR("MIC ERR", mic_err);
980 RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
981 RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
982 RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
983 RXS_ERR("LENGTH-ERR", rx_len_err);
984 RXS_ERR("OOM-ERR", rx_oom_err);
985 RXS_ERR("RATE-ERR", rx_rate_err);
986 RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
987
988 if (len > size)
989 len = size;
990
991 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
992 kfree(buf);
993
994 return retval;
995
996#undef RXS_ERR
997}
998
999void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 951void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
1000{ 952{
1001#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 953 ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
1002
1003 RX_STAT_INC(rx_pkts_all);
1004 sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
1005
1006 if (rs->rs_status & ATH9K_RXERR_CRC)
1007 RX_STAT_INC(crc_err);
1008 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
1009 RX_STAT_INC(decrypt_crc_err);
1010 if (rs->rs_status & ATH9K_RXERR_MIC)
1011 RX_STAT_INC(mic_err);
1012 if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
1013 RX_STAT_INC(pre_delim_crc_err);
1014 if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
1015 RX_STAT_INC(post_delim_crc_err);
1016 if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
1017 RX_STAT_INC(decrypt_busy_err);
1018
1019 if (rs->rs_status & ATH9K_RXERR_PHY) {
1020 RX_STAT_INC(phy_err);
1021 if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
1022 RX_PHY_ERR_INC(rs->rs_phyerr);
1023 }
1024
1025#undef RX_PHY_ERR_INC
1026} 954}
1027 955
1028static const struct file_operations fops_recv = {
1029 .read = read_file_recv,
1030 .open = simple_open,
1031 .owner = THIS_MODULE,
1032 .llseek = default_llseek,
1033};
1034
1035static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
1036 size_t count, loff_t *ppos)
1037{
1038#define PHY_ERR(s, p) \
1039 len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
1040 sc->debug.stats.rxstats.phy_err_stats[p]);
1041
1042 struct ath_softc *sc = file->private_data;
1043 char *buf;
1044 unsigned int len = 0, size = 1600;
1045 ssize_t retval = 0;
1046
1047 buf = kzalloc(size, GFP_KERNEL);
1048 if (buf == NULL)
1049 return -ENOMEM;
1050
1051 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
1052 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
1053 PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
1054 PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
1055 PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
1056 PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
1057 PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
1058 PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
1059 PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
1060 PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
1061 PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
1062 PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
1063 PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
1064 PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
1065 PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
1066 PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
1067 PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
1068 PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
1069 PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
1070 PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
1071 PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
1072 PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
1073 PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
1074 PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
1075 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
1076 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
1077
1078 if (len > size)
1079 len = size;
1080
1081 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1082 kfree(buf);
1083
1084 return retval;
1085
1086#undef PHY_ERR
1087}
1088
1089static const struct file_operations fops_phy_err = {
1090 .read = read_file_phy_err,
1091 .open = simple_open,
1092 .owner = THIS_MODULE,
1093 .llseek = default_llseek,
1094};
1095
1096static ssize_t read_file_regidx(struct file *file, char __user *user_buf, 956static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
1097 size_t count, loff_t *ppos) 957 size_t count, loff_t *ppos)
1098{ 958{
@@ -1268,62 +1128,6 @@ static const struct file_operations fops_dump_nfcal = {
1268 .llseek = default_llseek, 1128 .llseek = default_llseek,
1269}; 1129};
1270 1130
1271static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
1272 size_t count, loff_t *ppos)
1273{
1274 struct ath_softc *sc = file->private_data;
1275 struct ath_hw *ah = sc->sc_ah;
1276 u32 len = 0, size = 1500;
1277 ssize_t retval = 0;
1278 char *buf;
1279
1280 buf = kzalloc(size, GFP_KERNEL);
1281 if (!buf)
1282 return -ENOMEM;
1283
1284 len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
1285
1286 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1287 kfree(buf);
1288
1289 return retval;
1290}
1291
1292static const struct file_operations fops_base_eeprom = {
1293 .read = read_file_base_eeprom,
1294 .open = simple_open,
1295 .owner = THIS_MODULE,
1296 .llseek = default_llseek,
1297};
1298
1299static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
1300 size_t count, loff_t *ppos)
1301{
1302 struct ath_softc *sc = file->private_data;
1303 struct ath_hw *ah = sc->sc_ah;
1304 u32 len = 0, size = 6000;
1305 char *buf;
1306 size_t retval;
1307
1308 buf = kzalloc(size, GFP_KERNEL);
1309 if (buf == NULL)
1310 return -ENOMEM;
1311
1312 len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
1313
1314 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1315 kfree(buf);
1316
1317 return retval;
1318}
1319
1320static const struct file_operations fops_modal_eeprom = {
1321 .read = read_file_modal_eeprom,
1322 .open = simple_open,
1323 .owner = THIS_MODULE,
1324 .llseek = default_llseek,
1325};
1326
1327#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1131#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1328static ssize_t read_file_btcoex(struct file *file, char __user *user_buf, 1132static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
1329 size_t count, loff_t *ppos) 1133 size_t count, loff_t *ppos)
@@ -1524,10 +1328,10 @@ int ath9k_init_debug(struct ath_hw *ah)
1524 &fops_misc); 1328 &fops_misc);
1525 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc, 1329 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
1526 &fops_reset); 1330 &fops_reset);
1527 debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, 1331
1528 &fops_recv); 1332 ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
1529 debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc, 1333 ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
1530 &fops_phy_err); 1334
1531 debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy, 1335 debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
1532 &ah->rxchainmask); 1336 &ah->rxchainmask);
1533 debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy, 1337 debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1547,10 +1351,10 @@ int ath9k_init_debug(struct ath_hw *ah)
1547 &fops_regdump); 1351 &fops_regdump);
1548 debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc, 1352 debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
1549 &fops_dump_nfcal); 1353 &fops_dump_nfcal);
1550 debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, 1354
1551 &fops_base_eeprom); 1355 ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
1552 debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, 1356 ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
1553 &fops_modal_eeprom); 1357
1554 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, 1358 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1555 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1359 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1556 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, 1360 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 559a68c2709c..53ae15bd0c9d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -221,50 +221,6 @@ struct ath_rx_rate_stats {
221 } cck_stats[4]; 221 } cck_stats[4];
222}; 222};
223 223
224/**
225 * struct ath_rx_stats - RX Statistics
226 * @rx_pkts_all: No. of total frames received, including ones that
227 may have had errors.
228 * @rx_bytes_all: No. of total bytes received, including ones that
229 may have had errors.
230 * @crc_err: No. of frames with incorrect CRC value
231 * @decrypt_crc_err: No. of frames whose CRC check failed after
232 decryption process completed
233 * @phy_err: No. of frames whose reception failed because the PHY
234 encountered an error
235 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
236 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
237 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
238 * @decrypt_busy_err: Decryption interruptions counter
239 * @phy_err_stats: Individual PHY error statistics
240 * @rx_len_err: No. of frames discarded due to bad length.
241 * @rx_oom_err: No. of frames dropped due to OOM issues.
242 * @rx_rate_err: No. of frames dropped due to rate errors.
243 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
244 * @rx_beacons: No. of beacons received.
245 * @rx_frags: No. of rx-fragements received.
246 * @rx_spectral: No of spectral packets received.
247 */
248struct ath_rx_stats {
249 u32 rx_pkts_all;
250 u32 rx_bytes_all;
251 u32 crc_err;
252 u32 decrypt_crc_err;
253 u32 phy_err;
254 u32 mic_err;
255 u32 pre_delim_crc_err;
256 u32 post_delim_crc_err;
257 u32 decrypt_busy_err;
258 u32 phy_err_stats[ATH9K_PHYERR_MAX];
259 u32 rx_len_err;
260 u32 rx_oom_err;
261 u32 rx_rate_err;
262 u32 rx_too_many_frags_err;
263 u32 rx_beacons;
264 u32 rx_frags;
265 u32 rx_spectral;
266};
267
268#define ANT_MAIN 0 224#define ANT_MAIN 0
269#define ANT_ALT 1 225#define ANT_ALT 1
270 226
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 857bb28b3894..726271c7c330 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -178,12 +178,12 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
178 pe.ts = mactime; 178 pe.ts = mactime;
179 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) { 179 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
180 struct dfs_pattern_detector *pd = sc->dfs_detector; 180 struct dfs_pattern_detector *pd = sc->dfs_detector;
181 static u64 last_ts;
182 ath_dbg(common, DFS, 181 ath_dbg(common, DFS,
183 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " 182 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
184 "width=%d, rssi=%d, delta_ts=%llu\n", 183 "width=%d, rssi=%d, delta_ts=%llu\n",
185 pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts); 184 pe.freq, pe.ts, pe.width, pe.rssi,
186 last_ts = pe.ts; 185 pe.ts - sc->dfs_prev_pulse_ts);
186 sc->dfs_prev_pulse_ts = pe.ts;
187 DFS_STAT_INC(sc, pulses_processed); 187 DFS_STAT_INC(sc, pulses_processed);
188 if (pd != NULL && pd->add_pulse(pd, &pe)) { 188 if (pd != NULL && pd->add_pulse(pd, &pe)) {
189 DFS_STAT_INC(sc, radar_detected); 189 DFS_STAT_INC(sc, radar_detected);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index dab1f0cab993..09a5d72f3ff5 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -325,14 +325,14 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
325 325
326#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) 326#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
327#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) 327#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
328#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++) 328#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
329#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a) 329#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
330#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ 330#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
331 331
332#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) 332#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
333 333
334void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, 334void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
335 struct ath_htc_rx_status *rxs); 335 struct ath_rx_status *rs);
336 336
337struct ath_tx_stats { 337struct ath_tx_stats {
338 u32 buf_queued; 338 u32 buf_queued;
@@ -345,25 +345,18 @@ struct ath_tx_stats {
345 u32 queue_stats[IEEE80211_NUM_ACS]; 345 u32 queue_stats[IEEE80211_NUM_ACS];
346}; 346};
347 347
348struct ath_rx_stats { 348struct ath_skbrx_stats {
349 u32 skb_allocated; 349 u32 skb_allocated;
350 u32 skb_completed; 350 u32 skb_completed;
351 u32 skb_completed_bytes; 351 u32 skb_completed_bytes;
352 u32 skb_dropped; 352 u32 skb_dropped;
353 u32 err_crc;
354 u32 err_decrypt_crc;
355 u32 err_mic;
356 u32 err_pre_delim;
357 u32 err_post_delim;
358 u32 err_decrypt_busy;
359 u32 err_phy;
360 u32 err_phy_stats[ATH9K_PHYERR_MAX];
361}; 353};
362 354
363struct ath9k_debug { 355struct ath9k_debug {
364 struct dentry *debugfs_phy; 356 struct dentry *debugfs_phy;
365 struct ath_tx_stats tx_stats; 357 struct ath_tx_stats tx_stats;
366 struct ath_rx_stats rx_stats; 358 struct ath_rx_stats rx_stats;
359 struct ath_skbrx_stats skbrx_stats;
367}; 360};
368 361
369void ath9k_htc_get_et_strings(struct ieee80211_hw *hw, 362void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
@@ -385,7 +378,7 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
385#define TX_QSTAT_INC(c) do { } while (0) 378#define TX_QSTAT_INC(c) do { } while (0)
386 379
387static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, 380static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
388 struct ath_htc_rx_status *rxs) 381 struct ath_rx_status *rs)
389{ 382{
390} 383}
391 384
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index fb071ee4fcfb..8b529e4b8ac4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -243,39 +243,14 @@ static const struct file_operations fops_xmit = {
243}; 243};
244 244
245void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, 245void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
246 struct ath_htc_rx_status *rxs) 246 struct ath_rx_status *rs)
247{ 247{
248#define RX_PHY_ERR_INC(c) priv->debug.rx_stats.err_phy_stats[c]++ 248 ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs);
249
250 if (rxs->rs_status & ATH9K_RXERR_CRC)
251 priv->debug.rx_stats.err_crc++;
252 if (rxs->rs_status & ATH9K_RXERR_DECRYPT)
253 priv->debug.rx_stats.err_decrypt_crc++;
254 if (rxs->rs_status & ATH9K_RXERR_MIC)
255 priv->debug.rx_stats.err_mic++;
256 if (rxs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
257 priv->debug.rx_stats.err_pre_delim++;
258 if (rxs->rs_status & ATH9K_RX_DELIM_CRC_POST)
259 priv->debug.rx_stats.err_post_delim++;
260 if (rxs->rs_status & ATH9K_RX_DECRYPT_BUSY)
261 priv->debug.rx_stats.err_decrypt_busy++;
262
263 if (rxs->rs_status & ATH9K_RXERR_PHY) {
264 priv->debug.rx_stats.err_phy++;
265 if (rxs->rs_phyerr < ATH9K_PHYERR_MAX)
266 RX_PHY_ERR_INC(rxs->rs_phyerr);
267 }
268
269#undef RX_PHY_ERR_INC
270} 249}
271 250
272static ssize_t read_file_recv(struct file *file, char __user *user_buf, 251static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf,
273 size_t count, loff_t *ppos) 252 size_t count, loff_t *ppos)
274{ 253{
275#define PHY_ERR(s, p) \
276 len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
277 priv->debug.rx_stats.err_phy_stats[p]);
278
279 struct ath9k_htc_priv *priv = file->private_data; 254 struct ath9k_htc_priv *priv = file->private_data;
280 char *buf; 255 char *buf;
281 unsigned int len = 0, size = 1500; 256 unsigned int len = 0, size = 1500;
@@ -287,63 +262,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
287 262
288 len += scnprintf(buf + len, size - len, 263 len += scnprintf(buf + len, size - len,
289 "%20s : %10u\n", "SKBs allocated", 264 "%20s : %10u\n", "SKBs allocated",
290 priv->debug.rx_stats.skb_allocated); 265 priv->debug.skbrx_stats.skb_allocated);
291 len += scnprintf(buf + len, size - len, 266 len += scnprintf(buf + len, size - len,
292 "%20s : %10u\n", "SKBs completed", 267 "%20s : %10u\n", "SKBs completed",
293 priv->debug.rx_stats.skb_completed); 268 priv->debug.skbrx_stats.skb_completed);
294 len += scnprintf(buf + len, size - len, 269 len += scnprintf(buf + len, size - len,
295 "%20s : %10u\n", "SKBs Dropped", 270 "%20s : %10u\n", "SKBs Dropped",
296 priv->debug.rx_stats.skb_dropped); 271 priv->debug.skbrx_stats.skb_dropped);
297
298 len += scnprintf(buf + len, size - len,
299 "%20s : %10u\n", "CRC ERR",
300 priv->debug.rx_stats.err_crc);
301 len += scnprintf(buf + len, size - len,
302 "%20s : %10u\n", "DECRYPT CRC ERR",
303 priv->debug.rx_stats.err_decrypt_crc);
304 len += scnprintf(buf + len, size - len,
305 "%20s : %10u\n", "MIC ERR",
306 priv->debug.rx_stats.err_mic);
307 len += scnprintf(buf + len, size - len,
308 "%20s : %10u\n", "PRE-DELIM CRC ERR",
309 priv->debug.rx_stats.err_pre_delim);
310 len += scnprintf(buf + len, size - len,
311 "%20s : %10u\n", "POST-DELIM CRC ERR",
312 priv->debug.rx_stats.err_post_delim);
313 len += scnprintf(buf + len, size - len,
314 "%20s : %10u\n", "DECRYPT BUSY ERR",
315 priv->debug.rx_stats.err_decrypt_busy);
316 len += scnprintf(buf + len, size - len,
317 "%20s : %10u\n", "TOTAL PHY ERR",
318 priv->debug.rx_stats.err_phy);
319
320
321 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
322 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
323 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
324 PHY_ERR("RATE", ATH9K_PHYERR_RATE);
325 PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
326 PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
327 PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
328 PHY_ERR("TOR", ATH9K_PHYERR_TOR);
329 PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
330 PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
331 PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
332 PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
333 PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
334 PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
335 PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
336 PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
337 PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
338 PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
339 PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
340 PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
341 PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
342 PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
343 PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
344 PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
345 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
346 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
347 272
348 if (len > size) 273 if (len > size)
349 len = size; 274 len = size;
@@ -352,12 +277,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
352 kfree(buf); 277 kfree(buf);
353 278
354 return retval; 279 return retval;
355
356#undef PHY_ERR
357} 280}
358 281
359static const struct file_operations fops_recv = { 282static const struct file_operations fops_skb_rx = {
360 .read = read_file_recv, 283 .read = read_file_skb_rx,
361 .open = simple_open, 284 .open = simple_open,
362 .owner = THIS_MODULE, 285 .owner = THIS_MODULE,
363 .llseek = default_llseek, 286 .llseek = default_llseek,
@@ -486,423 +409,6 @@ static const struct file_operations fops_debug = {
486 .llseek = default_llseek, 409 .llseek = default_llseek,
487}; 410};
488 411
489static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct ath9k_htc_priv *priv = file->private_data;
493 struct ath_common *common = ath9k_hw_common(priv->ah);
494 struct base_eep_header *pBase = NULL;
495 unsigned int len = 0, size = 1500;
496 ssize_t retval = 0;
497 char *buf;
498
499 pBase = ath9k_htc_get_eeprom_base(priv);
500
501 if (pBase == NULL) {
502 ath_err(common, "Unknown EEPROM type\n");
503 return 0;
504 }
505
506 buf = kzalloc(size, GFP_KERNEL);
507 if (buf == NULL)
508 return -ENOMEM;
509
510 len += scnprintf(buf + len, size - len,
511 "%20s : %10d\n", "Major Version",
512 pBase->version >> 12);
513 len += scnprintf(buf + len, size - len,
514 "%20s : %10d\n", "Minor Version",
515 pBase->version & 0xFFF);
516 len += scnprintf(buf + len, size - len,
517 "%20s : %10d\n", "Checksum",
518 pBase->checksum);
519 len += scnprintf(buf + len, size - len,
520 "%20s : %10d\n", "Length",
521 pBase->length);
522 len += scnprintf(buf + len, size - len,
523 "%20s : %10d\n", "RegDomain1",
524 pBase->regDmn[0]);
525 len += scnprintf(buf + len, size - len,
526 "%20s : %10d\n", "RegDomain2",
527 pBase->regDmn[1]);
528 len += scnprintf(buf + len, size - len,
529 "%20s : %10d\n",
530 "TX Mask", pBase->txMask);
531 len += scnprintf(buf + len, size - len,
532 "%20s : %10d\n",
533 "RX Mask", pBase->rxMask);
534 len += scnprintf(buf + len, size - len,
535 "%20s : %10d\n",
536 "Allow 5GHz",
537 !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
538 len += scnprintf(buf + len, size - len,
539 "%20s : %10d\n",
540 "Allow 2GHz",
541 !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
542 len += scnprintf(buf + len, size - len,
543 "%20s : %10d\n",
544 "Disable 2GHz HT20",
545 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
546 len += scnprintf(buf + len, size - len,
547 "%20s : %10d\n",
548 "Disable 2GHz HT40",
549 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
550 len += scnprintf(buf + len, size - len,
551 "%20s : %10d\n",
552 "Disable 5Ghz HT20",
553 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
554 len += scnprintf(buf + len, size - len,
555 "%20s : %10d\n",
556 "Disable 5Ghz HT40",
557 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
558 len += scnprintf(buf + len, size - len,
559 "%20s : %10d\n",
560 "Big Endian",
561 !!(pBase->eepMisc & 0x01));
562 len += scnprintf(buf + len, size - len,
563 "%20s : %10d\n",
564 "Cal Bin Major Ver",
565 (pBase->binBuildNumber >> 24) & 0xFF);
566 len += scnprintf(buf + len, size - len,
567 "%20s : %10d\n",
568 "Cal Bin Minor Ver",
569 (pBase->binBuildNumber >> 16) & 0xFF);
570 len += scnprintf(buf + len, size - len,
571 "%20s : %10d\n",
572 "Cal Bin Build",
573 (pBase->binBuildNumber >> 8) & 0xFF);
574
575 /*
576 * UB91 specific data.
577 */
578 if (AR_SREV_9271(priv->ah)) {
579 struct base_eep_header_4k *pBase4k =
580 &priv->ah->eeprom.map4k.baseEepHeader;
581
582 len += scnprintf(buf + len, size - len,
583 "%20s : %10d\n",
584 "TX Gain type",
585 pBase4k->txGainType);
586 }
587
588 /*
589 * UB95 specific data.
590 */
591 if (priv->ah->hw_version.usbdev == AR9287_USB) {
592 struct base_eep_ar9287_header *pBase9287 =
593 &priv->ah->eeprom.map9287.baseEepHeader;
594
595 len += scnprintf(buf + len, size - len,
596 "%20s : %10ddB\n",
597 "Power Table Offset",
598 pBase9287->pwrTableOffset);
599
600 len += scnprintf(buf + len, size - len,
601 "%20s : %10d\n",
602 "OpenLoop Power Ctrl",
603 pBase9287->openLoopPwrCntl);
604 }
605
606 len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
607 pBase->macAddr);
608 if (len > size)
609 len = size;
610
611 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
612 kfree(buf);
613
614 return retval;
615}
616
617static const struct file_operations fops_base_eeprom = {
618 .read = read_file_base_eeprom,
619 .open = simple_open,
620 .owner = THIS_MODULE,
621 .llseek = default_llseek,
622};
623
624static ssize_t read_4k_modal_eeprom(struct file *file,
625 char __user *user_buf,
626 size_t count, loff_t *ppos)
627{
628#define PR_EEP(_s, _val) \
629 do { \
630 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
631 _s, (_val)); \
632 } while (0)
633
634 struct ath9k_htc_priv *priv = file->private_data;
635 struct modal_eep_4k_header *pModal = &priv->ah->eeprom.map4k.modalHeader;
636 unsigned int len = 0, size = 2048;
637 ssize_t retval = 0;
638 char *buf;
639
640 buf = kzalloc(size, GFP_KERNEL);
641 if (buf == NULL)
642 return -ENOMEM;
643
644 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
645 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
646 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
647 PR_EEP("Switch Settle", pModal->switchSettling);
648 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
649 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
650 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
651 PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
652 PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
653 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
654 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
655 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
656 PR_EEP("CCA Threshold)", pModal->thresh62);
657 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
658 PR_EEP("xpdGain", pModal->xpdGain);
659 PR_EEP("External PD", pModal->xpd);
660 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
661 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
662 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
663 PR_EEP("O/D Bias Version", pModal->version);
664 PR_EEP("CCK OutputBias", pModal->ob_0);
665 PR_EEP("BPSK OutputBias", pModal->ob_1);
666 PR_EEP("QPSK OutputBias", pModal->ob_2);
667 PR_EEP("16QAM OutputBias", pModal->ob_3);
668 PR_EEP("64QAM OutputBias", pModal->ob_4);
669 PR_EEP("CCK Driver1_Bias", pModal->db1_0);
670 PR_EEP("BPSK Driver1_Bias", pModal->db1_1);
671 PR_EEP("QPSK Driver1_Bias", pModal->db1_2);
672 PR_EEP("16QAM Driver1_Bias", pModal->db1_3);
673 PR_EEP("64QAM Driver1_Bias", pModal->db1_4);
674 PR_EEP("CCK Driver2_Bias", pModal->db2_0);
675 PR_EEP("BPSK Driver2_Bias", pModal->db2_1);
676 PR_EEP("QPSK Driver2_Bias", pModal->db2_2);
677 PR_EEP("16QAM Driver2_Bias", pModal->db2_3);
678 PR_EEP("64QAM Driver2_Bias", pModal->db2_4);
679 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
680 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
681 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
682 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
683 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
684 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
685 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
686 PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
687 PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
688 PR_EEP("Ant. Diversity ctl1", pModal->antdiv_ctl1);
689 PR_EEP("Ant. Diversity ctl2", pModal->antdiv_ctl2);
690 PR_EEP("TX Diversity", pModal->tx_diversity);
691
692 if (len > size)
693 len = size;
694
695 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
696 kfree(buf);
697
698 return retval;
699
700#undef PR_EEP
701}
702
703static ssize_t read_def_modal_eeprom(struct file *file,
704 char __user *user_buf,
705 size_t count, loff_t *ppos)
706{
707#define PR_EEP(_s, _val) \
708 do { \
709 if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
710 pModal = &priv->ah->eeprom.def.modalHeader[1]; \
711 len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
712 _s, (_val), "|"); \
713 } \
714 if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
715 pModal = &priv->ah->eeprom.def.modalHeader[0]; \
716 len += scnprintf(buf + len, size - len, "%9d\n",\
717 (_val)); \
718 } \
719 } while (0)
720
721 struct ath9k_htc_priv *priv = file->private_data;
722 struct base_eep_header *pBase = &priv->ah->eeprom.def.baseEepHeader;
723 struct modal_eep_header *pModal = NULL;
724 unsigned int len = 0, size = 3500;
725 ssize_t retval = 0;
726 char *buf;
727
728 buf = kzalloc(size, GFP_KERNEL);
729 if (buf == NULL)
730 return -ENOMEM;
731
732 len += scnprintf(buf + len, size - len,
733 "%31s %15s\n", "2G", "5G");
734 len += scnprintf(buf + len, size - len,
735 "%32s %16s\n", "====", "====\n");
736
737 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
738 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
739 PR_EEP("Chain2 Ant. Control", pModal->antCtrlChain[2]);
740 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
741 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
742 PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
743 PR_EEP("Chain2 Ant. Gain", pModal->antennaGainCh[2]);
744 PR_EEP("Switch Settle", pModal->switchSettling);
745 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
746 PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
747 PR_EEP("Chain2 TxRxAtten", pModal->txRxAttenCh[2]);
748 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
749 PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
750 PR_EEP("Chain2 RxTxMargin", pModal->rxTxMarginCh[2]);
751 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
752 PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
753 PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
754 PR_EEP("Chain1 xlna Gain", pModal->xlnaGainCh[1]);
755 PR_EEP("Chain2 xlna Gain", pModal->xlnaGainCh[2]);
756 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
757 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
758 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
759 PR_EEP("CCA Threshold)", pModal->thresh62);
760 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
761 PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
762 PR_EEP("Chain2 NF Threshold", pModal->noiseFloorThreshCh[2]);
763 PR_EEP("xpdGain", pModal->xpdGain);
764 PR_EEP("External PD", pModal->xpd);
765 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
766 PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
767 PR_EEP("Chain2 I Coefficient", pModal->iqCalICh[2]);
768 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
769 PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
770 PR_EEP("Chain2 Q Coefficient", pModal->iqCalQCh[2]);
771 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
772 PR_EEP("Chain0 OutputBias", pModal->ob);
773 PR_EEP("Chain0 DriverBias", pModal->db);
774 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
775 PR_EEP("2chain pwr decrease", pModal->pwrDecreaseFor2Chain);
776 PR_EEP("3chain pwr decrease", pModal->pwrDecreaseFor3Chain);
777 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
778 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
779 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
780 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
781 PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
782 PR_EEP("Chain2 bswAtten", pModal->bswAtten[2]);
783 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
784 PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
785 PR_EEP("Chain2 bswMargin", pModal->bswMargin[2]);
786 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
787 PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
788 PR_EEP("Chain1 xatten2Db", pModal->xatten2Db[1]);
789 PR_EEP("Chain2 xatten2Db", pModal->xatten2Db[2]);
790 PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
791 PR_EEP("Chain1 xatten2Margin", pModal->xatten2Margin[1]);
792 PR_EEP("Chain2 xatten2Margin", pModal->xatten2Margin[2]);
793 PR_EEP("Chain1 OutputBias", pModal->ob_ch1);
794 PR_EEP("Chain1 DriverBias", pModal->db_ch1);
795 PR_EEP("LNA Control", pModal->lna_ctl);
796 PR_EEP("XPA Bias Freq0", pModal->xpaBiasLvlFreq[0]);
797 PR_EEP("XPA Bias Freq1", pModal->xpaBiasLvlFreq[1]);
798 PR_EEP("XPA Bias Freq2", pModal->xpaBiasLvlFreq[2]);
799
800 if (len > size)
801 len = size;
802
803 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
804 kfree(buf);
805
806 return retval;
807
808#undef PR_EEP
809}
810
811static ssize_t read_9287_modal_eeprom(struct file *file,
812 char __user *user_buf,
813 size_t count, loff_t *ppos)
814{
815#define PR_EEP(_s, _val) \
816 do { \
817 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
818 _s, (_val)); \
819 } while (0)
820
821 struct ath9k_htc_priv *priv = file->private_data;
822 struct modal_eep_ar9287_header *pModal = &priv->ah->eeprom.map9287.modalHeader;
823 unsigned int len = 0, size = 3000;
824 ssize_t retval = 0;
825 char *buf;
826
827 buf = kzalloc(size, GFP_KERNEL);
828 if (buf == NULL)
829 return -ENOMEM;
830
831 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
832 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
833 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
834 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
835 PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
836 PR_EEP("Switch Settle", pModal->switchSettling);
837 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
838 PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
839 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
840 PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
841 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
842 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
843 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
844 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
845 PR_EEP("CCA Threshold)", pModal->thresh62);
846 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
847 PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
848 PR_EEP("xpdGain", pModal->xpdGain);
849 PR_EEP("External PD", pModal->xpd);
850 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
851 PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
852 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
853 PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
854 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
855 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
856 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
857 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
858 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
859 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
860 PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
861 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
862 PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
863 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
864 PR_EEP("AR92x7 Version", pModal->version);
865 PR_EEP("DriverBias1", pModal->db1);
866 PR_EEP("DriverBias2", pModal->db1);
867 PR_EEP("CCK OutputBias", pModal->ob_cck);
868 PR_EEP("PSK OutputBias", pModal->ob_psk);
869 PR_EEP("QAM OutputBias", pModal->ob_qam);
870 PR_EEP("PAL_OFF OutputBias", pModal->ob_pal_off);
871
872 if (len > size)
873 len = size;
874
875 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
876 kfree(buf);
877
878 return retval;
879
880#undef PR_EEP
881}
882
883static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
884 size_t count, loff_t *ppos)
885{
886 struct ath9k_htc_priv *priv = file->private_data;
887
888 if (AR_SREV_9271(priv->ah))
889 return read_4k_modal_eeprom(file, user_buf, count, ppos);
890 else if (priv->ah->hw_version.usbdev == AR9280_USB)
891 return read_def_modal_eeprom(file, user_buf, count, ppos);
892 else if (priv->ah->hw_version.usbdev == AR9287_USB)
893 return read_9287_modal_eeprom(file, user_buf, count, ppos);
894
895 return 0;
896}
897
898static const struct file_operations fops_modal_eeprom = {
899 .read = read_file_modal_eeprom,
900 .open = simple_open,
901 .owner = THIS_MODULE,
902 .llseek = default_llseek,
903};
904
905
906/* Ethtool support for get-stats */ 412/* Ethtool support for get-stats */
907#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO" 413#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
908static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = { 414static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -947,6 +453,8 @@ int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
947 453
948#define STXBASE priv->debug.tx_stats 454#define STXBASE priv->debug.tx_stats
949#define SRXBASE priv->debug.rx_stats 455#define SRXBASE priv->debug.rx_stats
456#define SKBTXBASE priv->debug.tx_stats
457#define SKBRXBASE priv->debug.skbrx_stats
950#define ASTXQ(a) \ 458#define ASTXQ(a) \
951 data[i++] = STXBASE.a[IEEE80211_AC_BE]; \ 459 data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
952 data[i++] = STXBASE.a[IEEE80211_AC_BK]; \ 460 data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
@@ -960,24 +468,24 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
960 struct ath9k_htc_priv *priv = hw->priv; 468 struct ath9k_htc_priv *priv = hw->priv;
961 int i = 0; 469 int i = 0;
962 470
963 data[i++] = STXBASE.skb_success; 471 data[i++] = SKBTXBASE.skb_success;
964 data[i++] = STXBASE.skb_success_bytes; 472 data[i++] = SKBTXBASE.skb_success_bytes;
965 data[i++] = SRXBASE.skb_completed; 473 data[i++] = SKBRXBASE.skb_completed;
966 data[i++] = SRXBASE.skb_completed_bytes; 474 data[i++] = SKBRXBASE.skb_completed_bytes;
967 475
968 ASTXQ(queue_stats); 476 ASTXQ(queue_stats);
969 477
970 data[i++] = SRXBASE.err_crc; 478 data[i++] = SRXBASE.crc_err;
971 data[i++] = SRXBASE.err_decrypt_crc; 479 data[i++] = SRXBASE.decrypt_crc_err;
972 data[i++] = SRXBASE.err_phy; 480 data[i++] = SRXBASE.phy_err;
973 data[i++] = SRXBASE.err_mic; 481 data[i++] = SRXBASE.mic_err;
974 data[i++] = SRXBASE.err_pre_delim; 482 data[i++] = SRXBASE.pre_delim_crc_err;
975 data[i++] = SRXBASE.err_post_delim; 483 data[i++] = SRXBASE.post_delim_crc_err;
976 data[i++] = SRXBASE.err_decrypt_busy; 484 data[i++] = SRXBASE.decrypt_busy_err;
977 485
978 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR]; 486 data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR];
979 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING]; 487 data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING];
980 data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING]; 488 data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING];
981 489
982 WARN_ON(i != ATH9K_HTC_SSTATS_LEN); 490 WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
983} 491}
@@ -1001,18 +509,21 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
1001 priv, &fops_tgt_rx_stats); 509 priv, &fops_tgt_rx_stats);
1002 debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy, 510 debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
1003 priv, &fops_xmit); 511 priv, &fops_xmit);
1004 debugfs_create_file("recv", S_IRUSR, priv->debug.debugfs_phy, 512 debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy,
1005 priv, &fops_recv); 513 priv, &fops_skb_rx);
514
515 ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats);
516 ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats);
517
1006 debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy, 518 debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
1007 priv, &fops_slot); 519 priv, &fops_slot);
1008 debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy, 520 debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
1009 priv, &fops_queue); 521 priv, &fops_queue);
1010 debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy, 522 debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
1011 priv, &fops_debug); 523 priv, &fops_debug);
1012 debugfs_create_file("base_eeprom", S_IRUSR, priv->debug.debugfs_phy, 524
1013 priv, &fops_base_eeprom); 525 ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah);
1014 debugfs_create_file("modal_eeprom", S_IRUSR, priv->debug.debugfs_phy, 526 ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah);
1015 priv, &fops_modal_eeprom);
1016 527
1017 return 0; 528 return 0;
1018} 529}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 289f3d8924b5..bb86eb2ffc95 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -996,8 +996,6 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
996 goto rx_next; 996 goto rx_next;
997 } 997 }
998 998
999 ath9k_htc_err_stat_rx(priv, rxstatus);
1000
1001 /* Get the RX status information */ 999 /* Get the RX status information */
1002 1000
1003 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1001 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
@@ -1005,6 +1003,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1005 /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER). 1003 /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
1006 * After this, we can drop this part of skb. */ 1004 * After this, we can drop this part of skb. */
1007 rx_status_htc_to_ath(&rx_stats, rxstatus); 1005 rx_status_htc_to_ath(&rx_stats, rxstatus);
1006 ath9k_htc_err_stat_rx(priv, &rx_stats);
1008 rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp); 1007 rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
1009 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE); 1008 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
1010 1009
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c8a9dfab1fee..2a8ed8375ec0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -26,7 +26,6 @@
26#include "ar9003_mac.h" 26#include "ar9003_mac.h"
27#include "ar9003_mci.h" 27#include "ar9003_mci.h"
28#include "ar9003_phy.h" 28#include "ar9003_phy.h"
29#include "debug.h"
30#include "ath9k.h" 29#include "ath9k.h"
31 30
32static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
@@ -246,6 +245,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
246 return; 245 return;
247 case AR9300_DEVID_AR953X: 246 case AR9300_DEVID_AR953X:
248 ah->hw_version.macVersion = AR_SREV_VERSION_9531; 247 ah->hw_version.macVersion = AR_SREV_VERSION_9531;
248 if (ah->get_mac_revision)
249 ah->hw_version.macRev = ah->get_mac_revision();
249 return; 250 return;
250 } 251 }
251 252
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 36ae6490e554..0246b990fe87 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -61,6 +61,10 @@ static int ath9k_ps_enable;
61module_param_named(ps_enable, ath9k_ps_enable, int, 0444); 61module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); 62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
63 63
64static int ath9k_use_chanctx;
65module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444);
66MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
67
64bool is_ath9k_unloaded; 68bool is_ath9k_unloaded;
65 69
66#ifdef CONFIG_MAC80211_LEDS 70#ifdef CONFIG_MAC80211_LEDS
@@ -508,7 +512,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
508 sc->tx99_power = MAX_RATE_POWER + 1; 512 sc->tx99_power = MAX_RATE_POWER + 1;
509 init_waitqueue_head(&sc->tx_wait); 513 init_waitqueue_head(&sc->tx_wait);
510 514
511 if (!pdata) { 515 if (!pdata || pdata->use_eeprom) {
512 ah->ah_flags |= AH_USE_EEPROM; 516 ah->ah_flags |= AH_USE_EEPROM;
513 sc->sc_ah->led_pin = -1; 517 sc->sc_ah->led_pin = -1;
514 } else { 518 } else {
@@ -589,6 +593,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
589 if (ret) 593 if (ret)
590 goto err_btcoex; 594 goto err_btcoex;
591 595
596 sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
597 NULL, sc, AR_FIRST_NDP_TIMER);
598
592 ath9k_cmn_init_crypto(sc->sc_ah); 599 ath9k_cmn_init_crypto(sc->sc_ah);
593 ath9k_init_misc(sc); 600 ath9k_init_misc(sc);
594 ath_fill_led_pin(sc); 601 ath_fill_led_pin(sc);
@@ -643,17 +650,20 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
643} 650}
644 651
645static const struct ieee80211_iface_limit if_limits[] = { 652static const struct ieee80211_iface_limit if_limits[] = {
646 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | 653 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
647 BIT(NL80211_IFTYPE_P2P_CLIENT) |
648 BIT(NL80211_IFTYPE_WDS) },
649 { .max = 8, .types = 654 { .max = 8, .types =
650#ifdef CONFIG_MAC80211_MESH 655#ifdef CONFIG_MAC80211_MESH
651 BIT(NL80211_IFTYPE_MESH_POINT) | 656 BIT(NL80211_IFTYPE_MESH_POINT) |
652#endif 657#endif
653 BIT(NL80211_IFTYPE_AP) | 658 BIT(NL80211_IFTYPE_AP) },
659 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
654 BIT(NL80211_IFTYPE_P2P_GO) }, 660 BIT(NL80211_IFTYPE_P2P_GO) },
655}; 661};
656 662
663static const struct ieee80211_iface_limit wds_limits[] = {
664 { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
665};
666
657static const struct ieee80211_iface_limit if_dfs_limits[] = { 667static const struct ieee80211_iface_limit if_dfs_limits[] = {
658 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | 668 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
659#ifdef CONFIG_MAC80211_MESH 669#ifdef CONFIG_MAC80211_MESH
@@ -670,6 +680,13 @@ static const struct ieee80211_iface_combination if_comb[] = {
670 .num_different_channels = 1, 680 .num_different_channels = 1,
671 .beacon_int_infra_match = true, 681 .beacon_int_infra_match = true,
672 }, 682 },
683 {
684 .limits = wds_limits,
685 .n_limits = ARRAY_SIZE(wds_limits),
686 .max_interfaces = 2048,
687 .num_different_channels = 1,
688 .beacon_int_infra_match = true,
689 },
673#ifdef CONFIG_ATH9K_DFS_CERTIFIED 690#ifdef CONFIG_ATH9K_DFS_CERTIFIED
674 { 691 {
675 .limits = if_dfs_limits, 692 .limits = if_dfs_limits,
@@ -711,19 +728,23 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
711 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 728 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
712 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 729 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
713 730
714 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 731 hw->wiphy->features |= (NL80211_FEATURE_ACTIVE_MONITOR |
732 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE);
715 733
716 if (!config_enabled(CONFIG_ATH9K_TX99)) { 734 if (!config_enabled(CONFIG_ATH9K_TX99)) {
717 hw->wiphy->interface_modes = 735 hw->wiphy->interface_modes =
718 BIT(NL80211_IFTYPE_P2P_GO) | 736 BIT(NL80211_IFTYPE_P2P_GO) |
719 BIT(NL80211_IFTYPE_P2P_CLIENT) | 737 BIT(NL80211_IFTYPE_P2P_CLIENT) |
720 BIT(NL80211_IFTYPE_AP) | 738 BIT(NL80211_IFTYPE_AP) |
721 BIT(NL80211_IFTYPE_WDS) |
722 BIT(NL80211_IFTYPE_STATION) | 739 BIT(NL80211_IFTYPE_STATION) |
723 BIT(NL80211_IFTYPE_ADHOC) | 740 BIT(NL80211_IFTYPE_ADHOC) |
724 BIT(NL80211_IFTYPE_MESH_POINT); 741 BIT(NL80211_IFTYPE_MESH_POINT);
725 hw->wiphy->iface_combinations = if_comb; 742 hw->wiphy->iface_combinations = if_comb;
726 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 743 if (!ath9k_use_chanctx) {
744 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
745 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS);
746 } else
747 hw->wiphy->n_iface_combinations = 1;
727 } 748 }
728 749
729 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 750 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -855,6 +876,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
855{ 876{
856 int i = 0; 877 int i = 0;
857 878
879 if (sc->p2p_ps_timer)
880 ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
881
858 ath9k_deinit_btcoex(sc); 882 ath9k_deinit_btcoex(sc);
859 883
860 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 884 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 51ce36f108f9..275205ab5f15 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -958,3 +958,25 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
958 return; 958 return;
959} 959}
960EXPORT_SYMBOL(ath9k_hw_set_interrupts); 960EXPORT_SYMBOL(ath9k_hw_set_interrupts);
961
962#define ATH9K_HW_MAX_DCU 10
963#define ATH9K_HW_SLICE_PER_DCU 16
964#define ATH9K_HW_BIT_IN_SLICE 16
965void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
966{
967 int dcu_idx;
968 u32 filter;
969
970 for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
971 filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
972 filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
973 filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
974 AR_D_TXBLK_WRITE_SLICE);
975 filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
976 ath_dbg(ath9k_hw_common(ah), PS,
977 "DCU%d staid %d set %d txfilter %08x\n",
978 dcu_idx, destidx, set, filter);
979 REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
980 }
981}
982EXPORT_SYMBOL(ath9k_hw_set_tx_filter);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 89df634e81f9..da7686757535 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -729,6 +729,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
729void ath9k_hw_abortpcurecv(struct ath_hw *ah); 729void ath9k_hw_abortpcurecv(struct ath_hw *ah);
730bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset); 730bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
731int ath9k_hw_beaconq_setup(struct ath_hw *ah); 731int ath9k_hw_beaconq_setup(struct ath_hw *ah);
732void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set);
732 733
733/* Interrupt Handling */ 734/* Interrupt Handling */
734bool ath9k_hw_intrpend(struct ath_hw *ah); 735bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index d69853b848ce..62ac95d6bb9d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
261 sc->gtt_cnt = 0; 261 sc->gtt_cnt = 0;
262 ieee80211_wake_queues(sc->hw); 262 ieee80211_wake_queues(sc->hw);
263 263
264 ath9k_p2p_ps_timer(sc);
265
264 return true; 266 return true;
265} 267}
266 268
@@ -419,6 +421,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
419 an->sc = sc; 421 an->sc = sc;
420 an->sta = sta; 422 an->sta = sta;
421 an->vif = vif; 423 an->vif = vif;
424 memset(&an->key_idx, 0, sizeof(an->key_idx));
422 425
423 ath_tx_node_init(sc, an); 426 ath_tx_node_init(sc, an);
424} 427}
@@ -1119,6 +1122,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1119 if (ath9k_uses_beacons(vif->type)) 1122 if (ath9k_uses_beacons(vif->type))
1120 ath9k_beacon_assign_slot(sc, vif); 1123 ath9k_beacon_assign_slot(sc, vif);
1121 1124
1125 avp->vif = vif;
1126
1122 an->sc = sc; 1127 an->sc = sc;
1123 an->sta = NULL; 1128 an->sta = NULL;
1124 an->vif = vif; 1129 an->vif = vif;
@@ -1163,6 +1168,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1163 return 0; 1168 return 0;
1164} 1169}
1165 1170
1171static void
1172ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
1173{
1174 struct ath_hw *ah = sc->sc_ah;
1175 s32 tsf, target_tsf;
1176
1177 if (!avp || !avp->noa.has_next_tsf)
1178 return;
1179
1180 ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
1181
1182 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1183
1184 target_tsf = avp->noa.next_tsf;
1185 if (!avp->noa.absent)
1186 target_tsf -= ATH_P2P_PS_STOP_TIME;
1187
1188 if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
1189 target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
1190
1191 ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
1192}
1193
1166static void ath9k_remove_interface(struct ieee80211_hw *hw, 1194static void ath9k_remove_interface(struct ieee80211_hw *hw,
1167 struct ieee80211_vif *vif) 1195 struct ieee80211_vif *vif)
1168{ 1196{
@@ -1174,6 +1202,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1174 1202
1175 mutex_lock(&sc->mutex); 1203 mutex_lock(&sc->mutex);
1176 1204
1205 spin_lock_bh(&sc->sc_pcu_lock);
1206 if (avp == sc->p2p_ps_vif) {
1207 sc->p2p_ps_vif = NULL;
1208 ath9k_update_p2p_ps_timer(sc, NULL);
1209 }
1210 spin_unlock_bh(&sc->sc_pcu_lock);
1211
1177 sc->nvifs--; 1212 sc->nvifs--;
1178 sc->tx99_vif = NULL; 1213 sc->tx99_vif = NULL;
1179 1214
@@ -1427,8 +1462,10 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1427 return 0; 1462 return 0;
1428 1463
1429 key = ath_key_config(common, vif, sta, &ps_key); 1464 key = ath_key_config(common, vif, sta, &ps_key);
1430 if (key > 0) 1465 if (key > 0) {
1431 an->ps_key = key; 1466 an->ps_key = key;
1467 an->key_idx[0] = key;
1468 }
1432 1469
1433 return 0; 1470 return 0;
1434} 1471}
@@ -1446,6 +1483,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1446 1483
1447 ath_key_delete(common, &ps_key); 1484 ath_key_delete(common, &ps_key);
1448 an->ps_key = 0; 1485 an->ps_key = 0;
1486 an->key_idx[0] = 0;
1449} 1487}
1450 1488
1451static int ath9k_sta_remove(struct ieee80211_hw *hw, 1489static int ath9k_sta_remove(struct ieee80211_hw *hw,
@@ -1460,6 +1498,19 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
1460 return 0; 1498 return 0;
1461} 1499}
1462 1500
1501static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
1502 struct ath_node *an,
1503 bool set)
1504{
1505 int i;
1506
1507 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1508 if (!an->key_idx[i])
1509 continue;
1510 ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
1511 }
1512}
1513
1463static void ath9k_sta_notify(struct ieee80211_hw *hw, 1514static void ath9k_sta_notify(struct ieee80211_hw *hw,
1464 struct ieee80211_vif *vif, 1515 struct ieee80211_vif *vif,
1465 enum sta_notify_cmd cmd, 1516 enum sta_notify_cmd cmd,
@@ -1472,8 +1523,10 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
1472 case STA_NOTIFY_SLEEP: 1523 case STA_NOTIFY_SLEEP:
1473 an->sleeping = true; 1524 an->sleeping = true;
1474 ath_tx_aggr_sleep(sta, sc, an); 1525 ath_tx_aggr_sleep(sta, sc, an);
1526 ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
1475 break; 1527 break;
1476 case STA_NOTIFY_AWAKE: 1528 case STA_NOTIFY_AWAKE:
1529 ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
1477 an->sleeping = false; 1530 an->sleeping = false;
1478 ath_tx_aggr_wakeup(sc, an); 1531 ath_tx_aggr_wakeup(sc, an);
1479 break; 1532 break;
@@ -1529,7 +1582,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1529{ 1582{
1530 struct ath_softc *sc = hw->priv; 1583 struct ath_softc *sc = hw->priv;
1531 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1584 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1532 int ret = 0; 1585 struct ath_node *an = NULL;
1586 int ret = 0, i;
1533 1587
1534 if (ath9k_modparam_nohwcrypt) 1588 if (ath9k_modparam_nohwcrypt)
1535 return -ENOSPC; 1589 return -ENOSPC;
@@ -1551,13 +1605,16 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1551 1605
1552 mutex_lock(&sc->mutex); 1606 mutex_lock(&sc->mutex);
1553 ath9k_ps_wakeup(sc); 1607 ath9k_ps_wakeup(sc);
1554 ath_dbg(common, CONFIG, "Set HW Key\n"); 1608 ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
1609 if (sta)
1610 an = (struct ath_node *)sta->drv_priv;
1555 1611
1556 switch (cmd) { 1612 switch (cmd) {
1557 case SET_KEY: 1613 case SET_KEY:
1558 if (sta) 1614 if (sta)
1559 ath9k_del_ps_key(sc, vif, sta); 1615 ath9k_del_ps_key(sc, vif, sta);
1560 1616
1617 key->hw_key_idx = 0;
1561 ret = ath_key_config(common, vif, sta, key); 1618 ret = ath_key_config(common, vif, sta, key);
1562 if (ret >= 0) { 1619 if (ret >= 0) {
1563 key->hw_key_idx = ret; 1620 key->hw_key_idx = ret;
@@ -1570,9 +1627,27 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1570 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 1627 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1571 ret = 0; 1628 ret = 0;
1572 } 1629 }
1630 if (an && key->hw_key_idx) {
1631 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1632 if (an->key_idx[i])
1633 continue;
1634 an->key_idx[i] = key->hw_key_idx;
1635 break;
1636 }
1637 WARN_ON(i == ARRAY_SIZE(an->key_idx));
1638 }
1573 break; 1639 break;
1574 case DISABLE_KEY: 1640 case DISABLE_KEY:
1575 ath_key_delete(common, key); 1641 ath_key_delete(common, key);
1642 if (an) {
1643 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1644 if (an->key_idx[i] != key->hw_key_idx)
1645 continue;
1646 an->key_idx[i] = 0;
1647 break;
1648 }
1649 }
1650 key->hw_key_idx = 0;
1576 break; 1651 break;
1577 default: 1652 default:
1578 ret = -EINVAL; 1653 ret = -EINVAL;
@@ -1636,6 +1711,66 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1636 ath9k_set_assoc_state(sc, vif); 1711 ath9k_set_assoc_state(sc, vif);
1637} 1712}
1638 1713
1714void ath9k_p2p_ps_timer(void *priv)
1715{
1716 struct ath_softc *sc = priv;
1717 struct ath_vif *avp = sc->p2p_ps_vif;
1718 struct ieee80211_vif *vif;
1719 struct ieee80211_sta *sta;
1720 struct ath_node *an;
1721 u32 tsf;
1722
1723 if (!avp)
1724 return;
1725
1726 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1727 if (!avp->noa.absent)
1728 tsf += ATH_P2P_PS_STOP_TIME;
1729
1730 if (!avp->noa.has_next_tsf ||
1731 avp->noa.next_tsf - tsf > BIT(31))
1732 ieee80211_update_p2p_noa(&avp->noa, tsf);
1733
1734 ath9k_update_p2p_ps_timer(sc, avp);
1735
1736 rcu_read_lock();
1737
1738 vif = avp->vif;
1739 sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
1740 if (!sta)
1741 goto out;
1742
1743 an = (void *) sta->drv_priv;
1744 if (an->sleeping == !!avp->noa.absent)
1745 goto out;
1746
1747 an->sleeping = avp->noa.absent;
1748 if (an->sleeping)
1749 ath_tx_aggr_sleep(sta, sc, an);
1750 else
1751 ath_tx_aggr_wakeup(sc, an);
1752
1753out:
1754 rcu_read_unlock();
1755}
1756
1757void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
1758{
1759 struct ath_vif *avp = (void *)vif->drv_priv;
1760 u32 tsf;
1761
1762 if (!sc->p2p_ps_timer)
1763 return;
1764
1765 if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
1766 return;
1767
1768 sc->p2p_ps_vif = avp;
1769 tsf = ath9k_hw_gettsf32(sc->sc_ah);
1770 ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
1771 ath9k_update_p2p_ps_timer(sc, avp);
1772}
1773
1639static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 1774static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1640 struct ieee80211_vif *vif, 1775 struct ieee80211_vif *vif,
1641 struct ieee80211_bss_conf *bss_conf, 1776 struct ieee80211_bss_conf *bss_conf,
@@ -1650,6 +1785,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1650 struct ath_hw *ah = sc->sc_ah; 1785 struct ath_hw *ah = sc->sc_ah;
1651 struct ath_common *common = ath9k_hw_common(ah); 1786 struct ath_common *common = ath9k_hw_common(ah);
1652 struct ath_vif *avp = (void *)vif->drv_priv; 1787 struct ath_vif *avp = (void *)vif->drv_priv;
1788 unsigned long flags;
1653 int slottime; 1789 int slottime;
1654 1790
1655 ath9k_ps_wakeup(sc); 1791 ath9k_ps_wakeup(sc);
@@ -1710,6 +1846,15 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1710 } 1846 }
1711 } 1847 }
1712 1848
1849 if (changed & BSS_CHANGED_P2P_PS) {
1850 spin_lock_bh(&sc->sc_pcu_lock);
1851 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1852 if (!(sc->ps_flags & PS_BEACON_SYNC))
1853 ath9k_update_p2p_ps(sc, vif);
1854 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1855 spin_unlock_bh(&sc->sc_pcu_lock);
1856 }
1857
1713 if (changed & CHECK_ANI) 1858 if (changed & CHECK_ANI)
1714 ath_check_ani(sc); 1859 ath_check_ani(sc);
1715 1860
@@ -1883,7 +2028,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
1883 return !!npend; 2028 return !!npend;
1884} 2029}
1885 2030
1886static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 2031static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2032 u32 queues, bool drop)
1887{ 2033{
1888 struct ath_softc *sc = hw->priv; 2034 struct ath_softc *sc = hw->priv;
1889 struct ath_hw *ah = sc->sc_ah; 2035 struct ath_hw *ah = sc->sc_ah;
@@ -2084,14 +2230,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2084 clear_bit(ATH_OP_SCANNING, &common->op_flags); 2230 clear_bit(ATH_OP_SCANNING, &common->op_flags);
2085} 2231}
2086 2232
2087static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
2088 struct ieee80211_vif *vif,
2089 struct cfg80211_chan_def *chandef)
2090{
2091 /* depend on vif->csa_active only */
2092 return;
2093}
2094
2095struct ieee80211_ops ath9k_ops = { 2233struct ieee80211_ops ath9k_ops = {
2096 .tx = ath9k_tx, 2234 .tx = ath9k_tx,
2097 .start = ath9k_start, 2235 .start = ath9k_start,
@@ -2139,5 +2277,4 @@ struct ieee80211_ops ath9k_ops = {
2139#endif 2277#endif
2140 .sw_scan_start = ath9k_sw_scan_start, 2278 .sw_scan_start = ath9k_sw_scan_start,
2141 .sw_scan_complete = ath9k_sw_scan_complete, 2279 .sw_scan_complete = ath9k_sw_scan_complete,
2142 .channel_switch_beacon = ath9k_channel_switch_beacon,
2143}; 2280};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 914dbc6b1720..4dec09e565ed 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -686,7 +686,7 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
686 struct ath_softc *sc = (struct ath_softc *) common->priv; 686 struct ath_softc *sc = (struct ath_softc *) common->priv;
687 struct ath9k_platform_data *pdata = sc->dev->platform_data; 687 struct ath9k_platform_data *pdata = sc->dev->platform_data;
688 688
689 if (pdata) { 689 if (pdata && !pdata->use_eeprom) {
690 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { 690 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
691 ath_err(common, 691 ath_err(common,
692 "%s: eeprom read failed, offset %08x is out of range\n", 692 "%s: eeprom read failed, offset %08x is out of range\n",
@@ -914,6 +914,7 @@ static int ath_pci_suspend(struct device *device)
914 */ 914 */
915 ath9k_stop_btcoex(sc); 915 ath9k_stop_btcoex(sc);
916 ath9k_hw_disable(sc->sc_ah); 916 ath9k_hw_disable(sc->sc_ah);
917 del_timer_sync(&sc->sleep_timer);
917 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 918 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
918 919
919 return 0; 920 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 19df969ec909..9105a92364f7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,7 +34,8 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
34 * buffer (or rx fifo). This can incorrectly acknowledge packets 34 * buffer (or rx fifo). This can incorrectly acknowledge packets
35 * to a sender if last desc is self-linked. 35 * to a sender if last desc is self-linked.
36 */ 36 */
37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf) 37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
38 bool flush)
38{ 39{
39 struct ath_hw *ah = sc->sc_ah; 40 struct ath_hw *ah = sc->sc_ah;
40 struct ath_common *common = ath9k_hw_common(ah); 41 struct ath_common *common = ath9k_hw_common(ah);
@@ -59,18 +60,19 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
59 common->rx_bufsize, 60 common->rx_bufsize,
60 0); 61 0);
61 62
62 if (sc->rx.rxlink == NULL) 63 if (sc->rx.rxlink)
63 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
64 else
65 *sc->rx.rxlink = bf->bf_daddr; 64 *sc->rx.rxlink = bf->bf_daddr;
65 else if (!flush)
66 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
66 67
67 sc->rx.rxlink = &ds->ds_link; 68 sc->rx.rxlink = &ds->ds_link;
68} 69}
69 70
70static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf) 71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
72 bool flush)
71{ 73{
72 if (sc->rx.buf_hold) 74 if (sc->rx.buf_hold)
73 ath_rx_buf_link(sc, sc->rx.buf_hold); 75 ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
74 76
75 sc->rx.buf_hold = bf; 77 sc->rx.buf_hold = bf;
76} 78}
@@ -442,7 +444,7 @@ int ath_startrecv(struct ath_softc *sc)
442 sc->rx.buf_hold = NULL; 444 sc->rx.buf_hold = NULL;
443 sc->rx.rxlink = NULL; 445 sc->rx.rxlink = NULL;
444 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 446 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
445 ath_rx_buf_link(sc, bf); 447 ath_rx_buf_link(sc, bf, false);
446 } 448 }
447 449
448 /* We could have deleted elements so the list may be empty now */ 450 /* We could have deleted elements so the list may be empty now */
@@ -538,7 +540,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
538 sc->ps_flags &= ~PS_BEACON_SYNC; 540 sc->ps_flags &= ~PS_BEACON_SYNC;
539 ath_dbg(common, PS, 541 ath_dbg(common, PS,
540 "Reconfigure beacon timers based on synchronized timestamp\n"); 542 "Reconfigure beacon timers based on synchronized timestamp\n");
541 ath9k_set_beacon(sc); 543 if (!(WARN_ON_ONCE(sc->cur_beacon_conf.beacon_interval == 0)))
544 ath9k_set_beacon(sc);
545 if (sc->p2p_ps_vif)
546 ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
542 } 547 }
543 548
544 if (ath_beacon_dtim_pending_cab(skb)) { 549 if (ath_beacon_dtim_pending_cab(skb)) {
@@ -1115,12 +1120,12 @@ requeue_drop_frag:
1115requeue: 1120requeue:
1116 list_add_tail(&bf->list, &sc->rx.rxbuf); 1121 list_add_tail(&bf->list, &sc->rx.rxbuf);
1117 1122
1118 if (edma) { 1123 if (!edma) {
1119 ath_rx_edma_buf_link(sc, qtype); 1124 ath_rx_buf_relink(sc, bf, flush);
1120 } else {
1121 ath_rx_buf_relink(sc, bf);
1122 if (!flush) 1125 if (!flush)
1123 ath9k_hw_rxena(ah); 1126 ath9k_hw_rxena(ah);
1127 } else if (!flush) {
1128 ath_rx_edma_buf_link(sc, qtype);
1124 } 1129 }
1125 1130
1126 if (!budget--) 1131 if (!budget--)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index b1fd3fa84983..f1bbce3f7774 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -505,9 +505,6 @@
505#define AR_D_QCUMASK 0x000003FF 505#define AR_D_QCUMASK 0x000003FF
506#define AR_D_QCUMASK_RESV0 0xFFFFFC00 506#define AR_D_QCUMASK_RESV0 0xFFFFFC00
507 507
508#define AR_D_TXBLK_CMD 0x1038
509#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
510
511#define AR_D0_LCL_IFS 0x1040 508#define AR_D0_LCL_IFS 0x1040
512#define AR_D1_LCL_IFS 0x1044 509#define AR_D1_LCL_IFS 0x1044
513#define AR_D2_LCL_IFS 0x1048 510#define AR_D2_LCL_IFS 0x1048
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4c8cdb097b65..f8ded84b7be8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1707,7 +1707,9 @@ found:
1707 return 0; 1707 return 0;
1708} 1708}
1709 1709
1710static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1710static void carl9170_op_flush(struct ieee80211_hw *hw,
1711 struct ieee80211_vif *vif,
1712 u32 queues, bool drop)
1711{ 1713{
1712 struct ar9170 *ar = hw->priv; 1714 struct ar9170 *ar = hw->priv;
1713 unsigned int vid; 1715 unsigned int vid;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index ca115f33746f..f35c7f30f9a6 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1076,8 +1076,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
1076 1076
1077 carl9170_set_state(ar, CARL9170_STOPPED); 1077 carl9170_set_state(ar, CARL9170_STOPPED);
1078 1078
1079 return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME, 1079 err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
1080 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2); 1080 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
1081 if (err) {
1082 usb_put_dev(udev);
1083 usb_put_dev(udev);
1084 carl9170_free(ar);
1085 }
1086 return err;
1081} 1087}
1082 1088
1083static void carl9170_usb_disconnect(struct usb_interface *intf) 1089static void carl9170_usb_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index a1a69c5db409..650be79c7ac9 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -73,9 +73,52 @@ static const struct radar_types etsi_radar_types_v15 = {
73 .radar_types = etsi_radar_ref_types_v15, 73 .radar_types = etsi_radar_ref_types_v15,
74}; 74};
75 75
76/* for now, we support ETSI radar types, FCC and JP are TODO */ 76#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
77{ \
78 ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
79 PMIN - PRI_TOLERANCE, \
80 PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
81 PPB_THRESH(PPB), PRI_TOLERANCE, \
82}
83
84static const struct radar_detector_specs fcc_radar_ref_types[] = {
85 FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
86 FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
87 FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
88 FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
89 FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
90 FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
91};
92
93static const struct radar_types fcc_radar_types = {
94 .region = NL80211_DFS_FCC,
95 .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types),
96 .radar_types = fcc_radar_ref_types,
97};
98
99#define JP_PATTERN FCC_PATTERN
100static const struct radar_detector_specs jp_radar_ref_types[] = {
101 JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
102 JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
103 JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
104 JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
105 JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
106 JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
107 JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
108 JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
109 JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
110};
111
112static const struct radar_types jp_radar_types = {
113 .region = NL80211_DFS_JP,
114 .num_radar_types = ARRAY_SIZE(jp_radar_ref_types),
115 .radar_types = jp_radar_ref_types,
116};
117
77static const struct radar_types *dfs_domains[] = { 118static const struct radar_types *dfs_domains[] = {
78 &etsi_radar_types_v15, 119 &etsi_radar_types_v15,
120 &fcc_radar_types,
121 &jp_radar_types,
79}; 122};
80 123
81/** 124/**
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 7bf0ef8a1f56..63986931829e 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2068,7 +2068,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
2068 if (!msg_ind) 2068 if (!msg_ind)
2069 goto nomem; 2069 goto nomem;
2070 msg_ind->msg_len = len; 2070 msg_ind->msg_len = len;
2071 msg_ind->msg = kmalloc(len, GFP_KERNEL); 2071 msg_ind->msg = kmemdup(buf, len, GFP_KERNEL);
2072 if (!msg_ind->msg) { 2072 if (!msg_ind->msg) {
2073 kfree(msg_ind); 2073 kfree(msg_ind);
2074nomem: 2074nomem:
@@ -2080,7 +2080,6 @@ nomem:
2080 msg_header->msg_type); 2080 msg_header->msg_type);
2081 break; 2081 break;
2082 } 2082 }
2083 memcpy(msg_ind->msg, buf, len);
2084 mutex_lock(&wcn->hal_ind_mutex); 2083 mutex_lock(&wcn->hal_ind_mutex);
2085 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); 2084 list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
2086 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); 2085 queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 4806a49cb61b..820d4ebd9322 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -172,7 +172,7 @@ static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
172 172
173static int wil_cfg80211_get_station(struct wiphy *wiphy, 173static int wil_cfg80211_get_station(struct wiphy *wiphy,
174 struct net_device *ndev, 174 struct net_device *ndev,
175 u8 *mac, struct station_info *sinfo) 175 const u8 *mac, struct station_info *sinfo)
176{ 176{
177 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 177 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
178 int rc; 178 int rc;
@@ -288,6 +288,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
288 } 288 }
289 289
290 wil->scan_request = request; 290 wil->scan_request = request;
291 mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
291 292
292 memset(&cmd, 0, sizeof(cmd)); 293 memset(&cmd, 0, sizeof(cmd));
293 cmd.cmd.num_channels = 0; 294 cmd.cmd.num_channels = 0;
@@ -671,7 +672,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
671} 672}
672 673
673static int wil_cfg80211_del_station(struct wiphy *wiphy, 674static int wil_cfg80211_del_station(struct wiphy *wiphy,
674 struct net_device *dev, u8 *mac) 675 struct net_device *dev, const u8 *mac)
675{ 676{
676 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 677 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
677 678
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ecdabe4adec3..8d4bc4bfb664 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -35,7 +35,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
35 void __iomem *x = wmi_addr(wil, vring->hwtail); 35 void __iomem *x = wmi_addr(wil, vring->hwtail);
36 36
37 seq_printf(s, "VRING %s = {\n", name); 37 seq_printf(s, "VRING %s = {\n", name);
38 seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa); 38 seq_printf(s, " pa = %pad\n", &vring->pa);
39 seq_printf(s, " va = 0x%p\n", vring->va); 39 seq_printf(s, " va = 0x%p\n", vring->va);
40 seq_printf(s, " size = %d\n", vring->size); 40 seq_printf(s, " size = %d\n", vring->size);
41 seq_printf(s, " swtail = %d\n", vring->swtail); 41 seq_printf(s, " swtail = %d\n", vring->swtail);
@@ -473,7 +473,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
473 u[0], u[1], u[2], u[3]); 473 u[0], u[1], u[2], u[3]);
474 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n", 474 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
475 u[4], u[5], u[6], u[7]); 475 u[4], u[5], u[6], u[7]);
476 seq_printf(s, " SKB = %p\n", skb); 476 seq_printf(s, " SKB = 0x%p\n", skb);
477 477
478 if (skb) { 478 if (skb) {
479 skb_get(skb); 479 skb_get(skb);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5824cd41e4ba..73593aa3cd98 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
338 } 338 }
339 339
340 if (isr) 340 if (isr)
341 wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr); 341 wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
342 342
343 wil->isr_misc = 0; 343 wil->isr_misc = 0;
344 344
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 95f4efe9ef37..11e6d9d22eae 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -81,7 +81,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
81 memset(&sta->stats, 0, sizeof(sta->stats)); 81 memset(&sta->stats, 0, sizeof(sta->stats));
82} 82}
83 83
84static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) 84static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
85{ 85{
86 int cid = -ENOENT; 86 int cid = -ENOENT;
87 struct net_device *ndev = wil_to_ndev(wil); 87 struct net_device *ndev = wil_to_ndev(wil);
@@ -150,6 +150,15 @@ static void wil_connect_timer_fn(ulong x)
150 schedule_work(&wil->disconnect_worker); 150 schedule_work(&wil->disconnect_worker);
151} 151}
152 152
153static void wil_scan_timer_fn(ulong x)
154{
155 struct wil6210_priv *wil = (void *)x;
156
157 clear_bit(wil_status_fwready, &wil->status);
158 wil_err(wil, "Scan timeout detected, start fw error recovery\n");
159 schedule_work(&wil->fw_error_worker);
160}
161
153static void wil_fw_error_worker(struct work_struct *work) 162static void wil_fw_error_worker(struct work_struct *work)
154{ 163{
155 struct wil6210_priv *wil = container_of(work, 164 struct wil6210_priv *wil = container_of(work,
@@ -161,12 +170,30 @@ static void wil_fw_error_worker(struct work_struct *work)
161 if (no_fw_recovery) 170 if (no_fw_recovery)
162 return; 171 return;
163 172
173 /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
174 * passed since last recovery attempt
175 */
176 if (time_is_after_jiffies(wil->last_fw_recovery +
177 WIL6210_FW_RECOVERY_TO))
178 wil->recovery_count++;
179 else
180 wil->recovery_count = 1; /* fw was alive for a long time */
181
182 if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
183 wil_err(wil, "too many recovery attempts (%d), giving up\n",
184 wil->recovery_count);
185 return;
186 }
187
188 wil->last_fw_recovery = jiffies;
189
164 mutex_lock(&wil->mutex); 190 mutex_lock(&wil->mutex);
165 switch (wdev->iftype) { 191 switch (wdev->iftype) {
166 case NL80211_IFTYPE_STATION: 192 case NL80211_IFTYPE_STATION:
167 case NL80211_IFTYPE_P2P_CLIENT: 193 case NL80211_IFTYPE_P2P_CLIENT:
168 case NL80211_IFTYPE_MONITOR: 194 case NL80211_IFTYPE_MONITOR:
169 wil_info(wil, "fw error recovery started...\n"); 195 wil_info(wil, "fw error recovery started (try %d)...\n",
196 wil->recovery_count);
170 wil_reset(wil); 197 wil_reset(wil);
171 198
172 /* need to re-allocate Rx ring after reset */ 199 /* need to re-allocate Rx ring after reset */
@@ -230,6 +257,7 @@ int wil_priv_init(struct wil6210_priv *wil)
230 257
231 wil->pending_connect_cid = -1; 258 wil->pending_connect_cid = -1;
232 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 259 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
260 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
233 261
234 INIT_WORK(&wil->connect_worker, wil_connect_worker); 262 INIT_WORK(&wil->connect_worker, wil_connect_worker);
235 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 263 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
@@ -249,10 +277,12 @@ int wil_priv_init(struct wil6210_priv *wil)
249 return -EAGAIN; 277 return -EAGAIN;
250 } 278 }
251 279
280 wil->last_fw_recovery = jiffies;
281
252 return 0; 282 return 0;
253} 283}
254 284
255void wil6210_disconnect(struct wil6210_priv *wil, void *bssid) 285void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
256{ 286{
257 del_timer_sync(&wil->connect_timer); 287 del_timer_sync(&wil->connect_timer);
258 _wil6210_disconnect(wil, bssid); 288 _wil6210_disconnect(wil, bssid);
@@ -260,6 +290,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
260 290
261void wil_priv_deinit(struct wil6210_priv *wil) 291void wil_priv_deinit(struct wil6210_priv *wil)
262{ 292{
293 del_timer_sync(&wil->scan_timer);
263 cancel_work_sync(&wil->disconnect_worker); 294 cancel_work_sync(&wil->disconnect_worker);
264 cancel_work_sync(&wil->fw_error_worker); 295 cancel_work_sync(&wil->fw_error_worker);
265 mutex_lock(&wil->mutex); 296 mutex_lock(&wil->mutex);
@@ -363,8 +394,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
363 wil_err(wil, "Firmware not ready\n"); 394 wil_err(wil, "Firmware not ready\n");
364 return -ETIME; 395 return -ETIME;
365 } else { 396 } else {
366 wil_dbg_misc(wil, "FW ready after %d ms\n", 397 wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
367 jiffies_to_msecs(to-left)); 398 jiffies_to_msecs(to-left), wil->hw_version);
368 } 399 }
369 return 0; 400 return 0;
370} 401}
@@ -391,6 +422,7 @@ int wil_reset(struct wil6210_priv *wil)
391 if (wil->scan_request) { 422 if (wil->scan_request) {
392 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", 423 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
393 wil->scan_request); 424 wil->scan_request);
425 del_timer_sync(&wil->scan_timer);
394 cfg80211_scan_done(wil->scan_request, true); 426 cfg80211_scan_done(wil->scan_request, true);
395 wil->scan_request = NULL; 427 wil->scan_request = NULL;
396 } 428 }
@@ -520,6 +552,7 @@ static int __wil_down(struct wil6210_priv *wil)
520 napi_disable(&wil->napi_tx); 552 napi_disable(&wil->napi_tx);
521 553
522 if (wil->scan_request) { 554 if (wil->scan_request) {
555 del_timer_sync(&wil->scan_timer);
523 cfg80211_scan_done(wil->scan_request, true); 556 cfg80211_scan_done(wil->scan_request, true);
524 wil->scan_request = NULL; 557 wil->scan_request = NULL;
525 } 558 }
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index fdcaeb820e75..106b6dcb773a 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -32,12 +32,26 @@ static int wil_stop(struct net_device *ndev)
32 return wil_down(wil); 32 return wil_down(wil);
33} 33}
34 34
35static int wil_change_mtu(struct net_device *ndev, int new_mtu)
36{
37 struct wil6210_priv *wil = ndev_to_wil(ndev);
38
39 if (new_mtu < 68 || new_mtu > IEEE80211_MAX_DATA_LEN_DMG)
40 return -EINVAL;
41
42 wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu);
43 ndev->mtu = new_mtu;
44
45 return 0;
46}
47
35static const struct net_device_ops wil_netdev_ops = { 48static const struct net_device_ops wil_netdev_ops = {
36 .ndo_open = wil_open, 49 .ndo_open = wil_open,
37 .ndo_stop = wil_stop, 50 .ndo_stop = wil_stop,
38 .ndo_start_xmit = wil_start_xmit, 51 .ndo_start_xmit = wil_start_xmit,
39 .ndo_set_mac_address = eth_mac_addr, 52 .ndo_set_mac_address = eth_mac_addr,
40 .ndo_validate_addr = eth_validate_addr, 53 .ndo_validate_addr = eth_validate_addr,
54 .ndo_change_mtu = wil_change_mtu,
41}; 55};
42 56
43static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) 57static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index f1e1bb338d68..1e2e07b9d13d 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
74 if (rc) 74 if (rc)
75 goto release_irq; 75 goto release_irq;
76 76
77 wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
78
79 return 0; 77 return 0;
80 78
81 release_irq: 79 release_irq:
@@ -140,7 +138,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
140 goto err_release_reg; 138 goto err_release_reg;
141 } 139 }
142 /* rollback to err_iounmap */ 140 /* rollback to err_iounmap */
143 dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr); 141 dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);
144 142
145 wil = wil_if_alloc(dev, csr); 143 wil = wil_if_alloc(dev, csr);
146 if (IS_ERR(wil)) { 144 if (IS_ERR(wil)) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index d04629fe053f..747ae1275877 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -49,10 +49,17 @@ static void wil_release_reorder_frames(struct wil6210_priv *wil,
49{ 49{
50 int index; 50 int index;
51 51
52 while (seq_less(r->head_seq_num, hseq)) { 52 /* note: this function is never called with
53 * hseq preceding r->head_seq_num, i.e it is always true
54 * !seq_less(hseq, r->head_seq_num)
55 * and thus on loop exit it should be
56 * r->head_seq_num == hseq
57 */
58 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
53 index = reorder_index(r, r->head_seq_num); 59 index = reorder_index(r, r->head_seq_num);
54 wil_release_reorder_frame(wil, r, index); 60 wil_release_reorder_frame(wil, r, index);
55 } 61 }
62 r->head_seq_num = hseq;
56} 63}
57 64
58static void wil_reorder_release(struct wil6210_priv *wil, 65static void wil_reorder_release(struct wil6210_priv *wil,
@@ -91,6 +98,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
91 98
92 spin_lock(&r->reorder_lock); 99 spin_lock(&r->reorder_lock);
93 100
101 /** Due to the race between WMI events, where BACK establishment
102 * reported, and data Rx, few packets may be pass up before reorder
103 * buffer get allocated. Catch up by pretending SSN is what we
104 * see in the 1-st Rx packet
105 */
106 if (r->first_time) {
107 r->first_time = false;
108 if (seq != r->head_seq_num) {
109 wil_err(wil, "Error: 1-st frame with wrong sequence"
110 " %d, should be %d. Fixing...\n", seq,
111 r->head_seq_num);
112 r->head_seq_num = seq;
113 r->ssn = seq;
114 }
115 }
116
94 /* frame with out of date sequence number */ 117 /* frame with out of date sequence number */
95 if (seq_less(seq, r->head_seq_num)) { 118 if (seq_less(seq, r->head_seq_num)) {
96 dev_kfree_skb(skb); 119 dev_kfree_skb(skb);
@@ -162,6 +185,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
162 r->head_seq_num = ssn; 185 r->head_seq_num = ssn;
163 r->buf_size = size; 186 r->buf_size = size;
164 r->stored_mpdu_num = 0; 187 r->stored_mpdu_num = 0;
188 r->first_time = true;
165 return r; 189 return r;
166} 190}
167 191
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index c8c547457eb4..0784ef3d4ce2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -64,6 +64,22 @@ static inline int wil_vring_avail_tx(struct vring *vring)
64 return vring->size - used - 1; 64 return vring->size - used - 1;
65} 65}
66 66
67/**
68 * wil_vring_wmark_low - low watermark for available descriptor space
69 */
70static inline int wil_vring_wmark_low(struct vring *vring)
71{
72 return vring->size/8;
73}
74
75/**
76 * wil_vring_wmark_high - high watermark for available descriptor space
77 */
78static inline int wil_vring_wmark_high(struct vring *vring)
79{
80 return vring->size/4;
81}
82
67static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) 83static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
68{ 84{
69 struct device *dev = wil_to_dev(wil); 85 struct device *dev = wil_to_dev(wil);
@@ -98,8 +114,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
98 _d->dma.status = TX_DMA_STATUS_DU; 114 _d->dma.status = TX_DMA_STATUS_DU;
99 } 115 }
100 116
101 wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, 117 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
102 vring->va, (unsigned long long)vring->pa, vring->ctx); 118 vring->va, &vring->pa, vring->ctx);
103 119
104 return 0; 120 return 0;
105} 121}
@@ -880,8 +896,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
880 pa = dma_map_single(dev, skb->data, 896 pa = dma_map_single(dev, skb->data,
881 skb_headlen(skb), DMA_TO_DEVICE); 897 skb_headlen(skb), DMA_TO_DEVICE);
882 898
883 wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb), 899 wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb),
884 skb->data, (unsigned long long)pa); 900 skb->data, &pa);
885 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 901 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
886 skb->data, skb_headlen(skb), false); 902 skb->data, skb_headlen(skb), false);
887 903
@@ -1007,7 +1023,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1007 rc = wil_tx_vring(wil, vring, skb); 1023 rc = wil_tx_vring(wil, vring, skb);
1008 1024
1009 /* do we still have enough room in the vring? */ 1025 /* do we still have enough room in the vring? */
1010 if (wil_vring_avail_tx(vring) < vring->size/8) 1026 if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))
1011 netif_tx_stop_all_queues(wil_to_ndev(wil)); 1027 netif_tx_stop_all_queues(wil_to_ndev(wil));
1012 1028
1013 switch (rc) { 1029 switch (rc) {
@@ -1116,7 +1132,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1116 done++; 1132 done++;
1117 } 1133 }
1118 } 1134 }
1119 if (wil_vring_avail_tx(vring) > vring->size/4) 1135 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring))
1120 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1136 netif_tx_wake_all_queues(wil_to_ndev(wil));
1121 1137
1122 return done; 1138 return done;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 2a2dec75f026..e25edc52398f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -35,11 +35,14 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
35#define WIL6210_MEM_SIZE (2*1024*1024UL) 35#define WIL6210_MEM_SIZE (2*1024*1024UL)
36 36
37#define WIL6210_RX_RING_SIZE (128) 37#define WIL6210_RX_RING_SIZE (128)
38#define WIL6210_TX_RING_SIZE (128) 38#define WIL6210_TX_RING_SIZE (512)
39#define WIL6210_MAX_TX_RINGS (24) /* HW limit */ 39#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
40#define WIL6210_MAX_CID (8) /* HW limit */ 40#define WIL6210_MAX_CID (8) /* HW limit */
41#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ 41#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
42#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */ 42#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */
43#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
44#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
45#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
43 46
44/* Hardware definitions begin */ 47/* Hardware definitions begin */
45 48
@@ -301,6 +304,7 @@ struct wil_tid_ampdu_rx {
301 u16 buf_size; 304 u16 buf_size;
302 u16 timeout; 305 u16 timeout;
303 u8 dialog_token; 306 u8 dialog_token;
307 bool first_time; /* is it 1-st time this buffer used? */
304}; 308};
305 309
306struct wil6210_stats { 310struct wil6210_stats {
@@ -360,6 +364,8 @@ struct wil6210_priv {
360 u32 fw_version; 364 u32 fw_version;
361 u32 hw_version; 365 u32 hw_version;
362 u8 n_mids; /* number of additional MIDs as reported by FW */ 366 u8 n_mids; /* number of additional MIDs as reported by FW */
367 int recovery_count; /* num of FW recovery attempts in a short time */
368 unsigned long last_fw_recovery; /* jiffies of last fw recovery */
363 /* profile */ 369 /* profile */
364 u32 monitor_flags; 370 u32 monitor_flags;
365 u32 secure_pcp; /* create secure PCP? */ 371 u32 secure_pcp; /* create secure PCP? */
@@ -381,6 +387,7 @@ struct wil6210_priv {
381 struct work_struct disconnect_worker; 387 struct work_struct disconnect_worker;
382 struct work_struct fw_error_worker; /* for FW error recovery */ 388 struct work_struct fw_error_worker; /* for FW error recovery */
383 struct timer_list connect_timer; 389 struct timer_list connect_timer;
390 struct timer_list scan_timer; /* detect scan timeout */
384 int pending_connect_cid; 391 int pending_connect_cid;
385 struct list_head pending_wmi_ev; 392 struct list_head pending_wmi_ev;
386 /* 393 /*
@@ -507,7 +514,7 @@ void wil_wdev_free(struct wil6210_priv *wil);
507int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); 514int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
508int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan); 515int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
509int wmi_pcp_stop(struct wil6210_priv *wil); 516int wmi_pcp_stop(struct wil6210_priv *wil);
510void wil6210_disconnect(struct wil6210_priv *wil, void *bssid); 517void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
511 518
512int wil_rx_init(struct wil6210_priv *wil); 519int wil_rx_init(struct wil6210_priv *wil);
513void wil_rx_fini(struct wil6210_priv *wil); 520void wil_rx_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2ba56eef0c45..6cc0e182cc70 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
192 might_sleep(); 192 might_sleep();
193 193
194 if (!test_bit(wil_status_fwready, &wil->status)) { 194 if (!test_bit(wil_status_fwready, &wil->status)) {
195 wil_err(wil, "FW not ready\n"); 195 wil_err(wil, "WMI: cannot send command while FW not ready\n");
196 return -EAGAIN; 196 return -EAGAIN;
197 } 197 }
198 198
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
276 wil->fw_version = le32_to_cpu(evt->sw_version); 276 wil->fw_version = le32_to_cpu(evt->sw_version);
277 wil->n_mids = evt->numof_additional_mids; 277 wil->n_mids = evt->numof_additional_mids;
278 278
279 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, 279 wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
280 evt->mac, wil->n_mids); 280 evt->mac, wil->n_mids);
281 281
282 if (!is_valid_ether_addr(ndev->dev_addr)) { 282 if (!is_valid_ether_addr(ndev->dev_addr)) {
283 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); 283 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
290static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, 290static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
291 int len) 291 int len)
292{ 292{
293 wil_dbg_wmi(wil, "WMI: FW ready\n"); 293 wil_dbg_wmi(wil, "WMI: got FW ready event\n");
294 294
295 set_bit(wil_status_fwready, &wil->status); 295 set_bit(wil_status_fwready, &wil->status);
296 /* reuse wmi_ready for the firmware ready indication */ 296 /* reuse wmi_ready for the firmware ready indication */
@@ -348,9 +348,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
348{ 348{
349 if (wil->scan_request) { 349 if (wil->scan_request) {
350 struct wmi_scan_complete_event *data = d; 350 struct wmi_scan_complete_event *data = d;
351 bool aborted = (data->status != 0); 351 bool aborted = (data->status != WMI_SCAN_SUCCESS);
352 352
353 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); 353 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
354 del_timer_sync(&wil->scan_timer);
354 cfg80211_scan_done(wil->scan_request, aborted); 355 cfg80211_scan_done(wil->scan_request, aborted);
355 wil->scan_request = NULL; 356 wil->scan_request = NULL;
356 } else { 357 } else {
@@ -658,21 +659,27 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
658 u8 *cmd; 659 u8 *cmd;
659 void __iomem *src; 660 void __iomem *src;
660 ulong flags; 661 ulong flags;
662 unsigned n;
661 663
662 if (!test_bit(wil_status_reset_done, &wil->status)) { 664 if (!test_bit(wil_status_reset_done, &wil->status)) {
663 wil_err(wil, "Reset not completed\n"); 665 wil_err(wil, "Reset not completed\n");
664 return; 666 return;
665 } 667 }
666 668
667 for (;;) { 669 for (n = 0;; n++) {
668 u16 len; 670 u16 len;
669 671
670 r->head = ioread32(wil->csr + HOST_MBOX + 672 r->head = ioread32(wil->csr + HOST_MBOX +
671 offsetof(struct wil6210_mbox_ctl, rx.head)); 673 offsetof(struct wil6210_mbox_ctl, rx.head));
672 if (r->tail == r->head) 674 if (r->tail == r->head) {
675 if (n == 0)
676 wil_dbg_wmi(wil, "No events?\n");
673 return; 677 return;
678 }
674 679
675 /* read cmd from tail */ 680 wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
681 r->head, r->tail);
682 /* read cmd descriptor from tail */
676 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail), 683 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
677 sizeof(struct wil6210_mbox_ring_desc)); 684 sizeof(struct wil6210_mbox_ring_desc));
678 if (d_tail.sync == 0) { 685 if (d_tail.sync == 0) {
@@ -680,13 +687,18 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
680 return; 687 return;
681 } 688 }
682 689
690 /* read cmd header from descriptor */
683 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) { 691 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
684 wil_err(wil, "Mbox evt at 0x%08x?\n", 692 wil_err(wil, "Mbox evt at 0x%08x?\n",
685 le32_to_cpu(d_tail.addr)); 693 le32_to_cpu(d_tail.addr));
686 return; 694 return;
687 } 695 }
688
689 len = le16_to_cpu(hdr.len); 696 len = le16_to_cpu(hdr.len);
697 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
698 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
699 hdr.flags);
700
701 /* read cmd buffer from descriptor */
690 src = wmi_buffer(wil, d_tail.addr) + 702 src = wmi_buffer(wil, d_tail.addr) +
691 sizeof(struct wil6210_mbox_hdr); 703 sizeof(struct wil6210_mbox_hdr);
692 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event, 704 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
@@ -702,9 +714,6 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
702 iowrite32(0, wil->csr + HOSTADDR(r->tail) + 714 iowrite32(0, wil->csr + HOSTADDR(r->tail) +
703 offsetof(struct wil6210_mbox_ring_desc, sync)); 715 offsetof(struct wil6210_mbox_ring_desc, sync));
704 /* indicate */ 716 /* indicate */
705 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
706 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
707 hdr.flags);
708 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 717 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
709 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 718 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
710 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; 719 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
@@ -734,6 +743,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
734 wil_dbg_wmi(wil, "queue_work -> %d\n", q); 743 wil_dbg_wmi(wil, "queue_work -> %d\n", q);
735 } 744 }
736 } 745 }
746 if (n > 1)
747 wil_dbg_wmi(wil, "%s -> %d events processed\n", __func__, n);
737} 748}
738 749
739int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, 750int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
@@ -802,6 +813,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
802 .network_type = wmi_nettype, 813 .network_type = wmi_nettype,
803 .disable_sec_offload = 1, 814 .disable_sec_offload = 1,
804 .channel = chan - 1, 815 .channel = chan - 1,
816 .pcp_max_assoc_sta = WIL6210_MAX_CID,
805 }; 817 };
806 struct { 818 struct {
807 struct wil6210_mbox_hdr_wmi wmi; 819 struct wil6210_mbox_hdr_wmi wmi;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 50b8528394f4..17334c852866 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -28,7 +28,7 @@
28#define __WILOCITY_WMI_H__ 28#define __WILOCITY_WMI_H__
29 29
30/* General */ 30/* General */
31 31#define WILOCITY_MAX_ASSOC_STA (8)
32#define WMI_MAC_LEN (6) 32#define WMI_MAC_LEN (6)
33#define WMI_PROX_RANGE_NUM (3) 33#define WMI_PROX_RANGE_NUM (3)
34 34
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
219 __le16 disconnect_reason; 219 __le16 disconnect_reason;
220} __packed; 220} __packed;
221 221
222/*
223 * WMI_RECONNECT_CMDID
224 */
225struct wmi_reconnect_cmd {
226 u8 channel; /* hint */
227 u8 reserved;
228 u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
229} __packed;
230
231 222
232/* 223/*
233 * WMI_SET_PMK_CMDID 224 * WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
296 WMI_LONG_SCAN = 0, 287 WMI_LONG_SCAN = 0,
297 WMI_SHORT_SCAN = 1, 288 WMI_SHORT_SCAN = 1,
298 WMI_PBC_SCAN = 2, 289 WMI_PBC_SCAN = 2,
290 WMI_ACTIVE_SCAN = 3,
291 WMI_DIRECT_SCAN = 4,
299}; 292};
300 293
301struct wmi_start_scan_cmd { 294struct wmi_start_scan_cmd {
302 u8 reserved[8]; 295 u8 direct_scan_mac_addr[6];
303 296 u8 reserved[2];
304 __le32 home_dwell_time; /* Max duration in the home channel(ms) */ 297 __le32 home_dwell_time; /* Max duration in the home channel(ms) */
305 __le32 force_scan_interval; /* Time interval between scans (ms)*/ 298 __le32 force_scan_interval; /* Time interval between scans (ms)*/
306 u8 scan_type; /* wmi_scan_type */ 299 u8 scan_type; /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
332 u8 ssid[WMI_MAX_SSID_LEN]; 325 u8 ssid[WMI_MAX_SSID_LEN];
333} __packed; 326} __packed;
334 327
328
335/* 329/*
336 * WMI_SET_APPIE_CMDID 330 * WMI_SET_APPIE_CMDID
337 * Add Application specified IE to a management frame 331 * Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
427 __le16 frag_num; 421 __le16 frag_num;
428 __le64 ss_mask; 422 __le64 ss_mask;
429 u8 network_type; 423 u8 network_type;
430 u8 reserved; 424 u8 pcp_max_assoc_sta;
431 u8 disable_sec_offload; 425 u8 disable_sec_offload;
432 u8 disable_sec; 426 u8 disable_sec;
433} __packed; 427} __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
450struct wmi_port_allocate_cmd { 444struct wmi_port_allocate_cmd {
451 u8 mac[WMI_MAC_LEN]; 445 u8 mac[WMI_MAC_LEN];
452 u8 port_role; 446 u8 port_role;
453 u8 midid; 447 u8 mid;
454} __packed; 448} __packed;
455 449
456/* 450/*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
467enum wmi_discovery_mode { 461enum wmi_discovery_mode {
468 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0, 462 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
469 WMI_DISCOVERY_MODE_OFFLOAD = 1, 463 WMI_DISCOVERY_MODE_OFFLOAD = 1,
464 WMI_DISCOVERY_MODE_PEER2PEER = 2,
470}; 465};
471 466
472struct wmi_p2p_cfg_cmd { 467struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
493 */ 488 */
494struct wmi_pcp_start_cmd { 489struct wmi_pcp_start_cmd {
495 __le16 bcon_interval; 490 __le16 bcon_interval;
496 u8 reserved0[10]; 491 u8 pcp_max_assoc_sta;
492 u8 reserved0[9];
497 u8 network_type; 493 u8 network_type;
498 u8 channel; 494 u8 channel;
499 u8 disable_sec_offload; 495 u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
857 WMI_RF_MGMT_STATUS_EVENTID = 0x1853, 853 WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
858 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, 854 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
859 WMI_RX_MGMT_PACKET_EVENTID = 0x1840, 855 WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
856 WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
860 857
861 /* Performance monitoring events */ 858 /* Performance monitoring events */
862 WMI_DATA_PORT_OPEN_EVENTID = 0x1860, 859 WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
1040struct wmi_disconnect_event { 1037struct wmi_disconnect_event {
1041 __le16 protocol_reason_status; /* reason code, see 802.11 spec. */ 1038 __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
1042 u8 bssid[WMI_MAC_LEN]; /* set if known */ 1039 u8 bssid[WMI_MAC_LEN]; /* set if known */
1043 u8 disconnect_reason; /* see wmi_disconnect_reason_e */ 1040 u8 disconnect_reason; /* see wmi_disconnect_reason */
1044 u8 assoc_resp_len; 1041 u8 assoc_resp_len; /* not in use */
1045 u8 assoc_info[0]; 1042 u8 assoc_info[0]; /* not in use */
1046} __packed; 1043} __packed;
1047 1044
1048/* 1045/*
1049 * WMI_SCAN_COMPLETE_EVENTID 1046 * WMI_SCAN_COMPLETE_EVENTID
1050 */ 1047 */
1048enum scan_status {
1049 WMI_SCAN_SUCCESS = 0,
1050 WMI_SCAN_FAILED = 1,
1051 WMI_SCAN_ABORTED = 2,
1052 WMI_SCAN_REJECTED = 3,
1053};
1054
1051struct wmi_scan_complete_event { 1055struct wmi_scan_complete_event {
1052 __le32 status; 1056 __le32 status; /* scan_status */
1053} __packed; 1057} __packed;
1054 1058
1055/* 1059/*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
1256 u8 channel; /* From Radio MNGR */ 1260 u8 channel; /* From Radio MNGR */
1257} __packed; 1261} __packed;
1258 1262
1263
1264/*
1265 * WMI_TX_MGMT_PACKET_EVENTID
1266 */
1267struct wmi_tx_mgmt_packet_event {
1268 u8 payload[0];
1269} __packed;
1270
1259struct wmi_rx_mgmt_packet_event { 1271struct wmi_rx_mgmt_packet_event {
1260 struct wmi_rx_mgmt_info info; 1272 struct wmi_rx_mgmt_info info;
1261 u8 payload[0]; 1273 u8 payload[0];
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 088d544ec63f..e3f67b8d3f80 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,7 +1,8 @@
1config B43 1config B43
2 tristate "Broadcom 43xx wireless support (mac80211 stack)" 2 tristate "Broadcom 43xx wireless support (mac80211 stack)"
3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA 3 depends on (BCMA_POSSIBLE || SSB_POSSIBLE) && MAC80211 && HAS_DMA
4 select SSB 4 select BCMA if B43_BCMA
5 select SSB if B43_SSB
5 select FW_LOADER 6 select FW_LOADER
6 ---help--- 7 ---help---
7 b43 is a driver for the Broadcom 43xx series wireless devices. 8 b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -27,14 +28,33 @@ config B43
27 If unsure, say M. 28 If unsure, say M.
28 29
29config B43_BCMA 30config B43_BCMA
30 bool "Support for BCMA bus" 31 bool
31 depends on B43 && (BCMA = y || BCMA = B43)
32 default y
33 32
34config B43_SSB 33config B43_SSB
35 bool 34 bool
36 depends on B43 && (SSB = y || SSB = B43) 35
37 default y 36choice
37 prompt "Supported bus types"
38 depends on B43
39 default B43_BCMA_AND_SSB
40
41config B43_BUSES_BCMA_AND_SSB
42 bool "BCMA and SSB"
43 depends on BCMA_POSSIBLE && SSB_POSSIBLE
44 select B43_BCMA
45 select B43_SSB
46
47config B43_BUSES_BCMA
48 bool "BCMA only"
49 depends on BCMA_POSSIBLE
50 select B43_BCMA
51
52config B43_BUSES_SSB
53 bool "SSB only"
54 depends on SSB_POSSIBLE
55 select B43_SSB
56
57endchoice
38 58
39# Auto-select SSB PCI-HOST support, if possible 59# Auto-select SSB PCI-HOST support, if possible
40config B43_PCI_AUTOSELECT 60config B43_PCI_AUTOSELECT
@@ -53,7 +73,7 @@ config B43_PCICORE_AUTOSELECT
53 73
54config B43_PCMCIA 74config B43_PCMCIA
55 bool "Broadcom 43xx PCMCIA device support" 75 bool "Broadcom 43xx PCMCIA device support"
56 depends on B43 && SSB_PCMCIAHOST_POSSIBLE 76 depends on B43 && B43_SSB && SSB_PCMCIAHOST_POSSIBLE
57 select SSB_PCMCIAHOST 77 select SSB_PCMCIAHOST
58 ---help--- 78 ---help---
59 Broadcom 43xx PCMCIA device support. 79 Broadcom 43xx PCMCIA device support.
@@ -73,7 +93,7 @@ config B43_PCMCIA
73 93
74config B43_SDIO 94config B43_SDIO
75 bool "Broadcom 43xx SDIO device support" 95 bool "Broadcom 43xx SDIO device support"
76 depends on B43 && SSB_SDIOHOST_POSSIBLE 96 depends on B43 && B43_SSB && SSB_SDIOHOST_POSSIBLE
77 select SSB_SDIOHOST 97 select SSB_SDIOHOST
78 ---help--- 98 ---help---
79 Broadcom 43xx device support for Soft-MAC SDIO devices. 99 Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -98,7 +118,7 @@ config B43_BCMA_PIO
98 118
99config B43_PIO 119config B43_PIO
100 bool 120 bool
101 depends on B43 121 depends on B43 && B43_SSB
102 select SSB_BLOCKIO 122 select SSB_BLOCKIO
103 default y 123 default y
104 124
@@ -116,7 +136,7 @@ config B43_PHY_N
116 136
117config B43_PHY_LP 137config B43_PHY_LP
118 bool "Support for low-power (LP-PHY) devices" 138 bool "Support for low-power (LP-PHY) devices"
119 depends on B43 139 depends on B43 && B43_SSB
120 default y 140 default y
121 ---help--- 141 ---help---
122 Support for the LP-PHY. 142 Support for the LP-PHY.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 54376fddfaf9..4113b6934764 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -915,10 +915,6 @@ struct b43_wl {
915 char rng_name[30 + 1]; 915 char rng_name[30 + 1];
916#endif /* CONFIG_B43_HWRNG */ 916#endif /* CONFIG_B43_HWRNG */
917 917
918 /* List of all wireless devices on this chip */
919 struct list_head devlist;
920 u8 nr_devs;
921
922 bool radiotap_enabled; 918 bool radiotap_enabled;
923 bool radio_enabled; 919 bool radio_enabled;
924 920
diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h
index 184c95659279..f3205c6988bc 100644
--- a/drivers/net/wireless/b43/bus.h
+++ b/drivers/net/wireless/b43/bus.h
@@ -5,7 +5,9 @@ enum b43_bus_type {
5#ifdef CONFIG_B43_BCMA 5#ifdef CONFIG_B43_BCMA
6 B43_BUS_BCMA, 6 B43_BUS_BCMA,
7#endif 7#endif
8#ifdef CONFIG_B43_SSB
8 B43_BUS_SSB, 9 B43_BUS_SSB,
10#endif
9}; 11};
10 12
11struct b43_bus_dev { 13struct b43_bus_dev {
@@ -52,13 +54,21 @@ struct b43_bus_dev {
52 54
53static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev) 55static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
54{ 56{
57#ifdef CONFIG_B43_SSB
55 return (dev->bus_type == B43_BUS_SSB && 58 return (dev->bus_type == B43_BUS_SSB &&
56 dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA); 59 dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
60#else
61 return false;
62#endif
57} 63}
58static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev) 64static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
59{ 65{
66#ifdef CONFIG_B43_SSB
60 return (dev->bus_type == B43_BUS_SSB && 67 return (dev->bus_type == B43_BUS_SSB &&
61 dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO); 68 dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
69#else
70 return false;
71#endif
62} 72}
63 73
64struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core); 74struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 69fc3d65531a..32538ac5f7e4 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -182,7 +182,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
182#define b43_g_ratetable (__b43_ratetable + 0) 182#define b43_g_ratetable (__b43_ratetable + 0)
183#define b43_g_ratetable_size 12 183#define b43_g_ratetable_size 12
184 184
185#define CHAN4G(_channel, _freq, _flags) { \ 185#define CHAN2G(_channel, _freq, _flags) { \
186 .band = IEEE80211_BAND_2GHZ, \ 186 .band = IEEE80211_BAND_2GHZ, \
187 .center_freq = (_freq), \ 187 .center_freq = (_freq), \
188 .hw_value = (_channel), \ 188 .hw_value = (_channel), \
@@ -191,23 +191,31 @@ static struct ieee80211_rate __b43_ratetable[] = {
191 .max_power = 30, \ 191 .max_power = 30, \
192} 192}
193static struct ieee80211_channel b43_2ghz_chantable[] = { 193static struct ieee80211_channel b43_2ghz_chantable[] = {
194 CHAN4G(1, 2412, 0), 194 CHAN2G(1, 2412, 0),
195 CHAN4G(2, 2417, 0), 195 CHAN2G(2, 2417, 0),
196 CHAN4G(3, 2422, 0), 196 CHAN2G(3, 2422, 0),
197 CHAN4G(4, 2427, 0), 197 CHAN2G(4, 2427, 0),
198 CHAN4G(5, 2432, 0), 198 CHAN2G(5, 2432, 0),
199 CHAN4G(6, 2437, 0), 199 CHAN2G(6, 2437, 0),
200 CHAN4G(7, 2442, 0), 200 CHAN2G(7, 2442, 0),
201 CHAN4G(8, 2447, 0), 201 CHAN2G(8, 2447, 0),
202 CHAN4G(9, 2452, 0), 202 CHAN2G(9, 2452, 0),
203 CHAN4G(10, 2457, 0), 203 CHAN2G(10, 2457, 0),
204 CHAN4G(11, 2462, 0), 204 CHAN2G(11, 2462, 0),
205 CHAN4G(12, 2467, 0), 205 CHAN2G(12, 2467, 0),
206 CHAN4G(13, 2472, 0), 206 CHAN2G(13, 2472, 0),
207 CHAN4G(14, 2484, 0), 207 CHAN2G(14, 2484, 0),
208}; 208};
209#undef CHAN4G 209#undef CHAN2G
210 210
211#define CHAN4G(_channel, _flags) { \
212 .band = IEEE80211_BAND_5GHZ, \
213 .center_freq = 4000 + (5 * (_channel)), \
214 .hw_value = (_channel), \
215 .flags = (_flags), \
216 .max_antenna_gain = 0, \
217 .max_power = 30, \
218}
211#define CHAN5G(_channel, _flags) { \ 219#define CHAN5G(_channel, _flags) { \
212 .band = IEEE80211_BAND_5GHZ, \ 220 .band = IEEE80211_BAND_5GHZ, \
213 .center_freq = 5000 + (5 * (_channel)), \ 221 .center_freq = 5000 + (5 * (_channel)), \
@@ -217,6 +225,18 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
217 .max_power = 30, \ 225 .max_power = 30, \
218} 226}
219static struct ieee80211_channel b43_5ghz_nphy_chantable[] = { 227static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
228 CHAN4G(184, 0), CHAN4G(186, 0),
229 CHAN4G(188, 0), CHAN4G(190, 0),
230 CHAN4G(192, 0), CHAN4G(194, 0),
231 CHAN4G(196, 0), CHAN4G(198, 0),
232 CHAN4G(200, 0), CHAN4G(202, 0),
233 CHAN4G(204, 0), CHAN4G(206, 0),
234 CHAN4G(208, 0), CHAN4G(210, 0),
235 CHAN4G(212, 0), CHAN4G(214, 0),
236 CHAN4G(216, 0), CHAN4G(218, 0),
237 CHAN4G(220, 0), CHAN4G(222, 0),
238 CHAN4G(224, 0), CHAN4G(226, 0),
239 CHAN4G(228, 0),
220 CHAN5G(32, 0), CHAN5G(34, 0), 240 CHAN5G(32, 0), CHAN5G(34, 0),
221 CHAN5G(36, 0), CHAN5G(38, 0), 241 CHAN5G(36, 0), CHAN5G(38, 0),
222 CHAN5G(40, 0), CHAN5G(42, 0), 242 CHAN5G(40, 0), CHAN5G(42, 0),
@@ -260,18 +280,7 @@ static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
260 CHAN5G(170, 0), CHAN5G(172, 0), 280 CHAN5G(170, 0), CHAN5G(172, 0),
261 CHAN5G(174, 0), CHAN5G(176, 0), 281 CHAN5G(174, 0), CHAN5G(176, 0),
262 CHAN5G(178, 0), CHAN5G(180, 0), 282 CHAN5G(178, 0), CHAN5G(180, 0),
263 CHAN5G(182, 0), CHAN5G(184, 0), 283 CHAN5G(182, 0),
264 CHAN5G(186, 0), CHAN5G(188, 0),
265 CHAN5G(190, 0), CHAN5G(192, 0),
266 CHAN5G(194, 0), CHAN5G(196, 0),
267 CHAN5G(198, 0), CHAN5G(200, 0),
268 CHAN5G(202, 0), CHAN5G(204, 0),
269 CHAN5G(206, 0), CHAN5G(208, 0),
270 CHAN5G(210, 0), CHAN5G(212, 0),
271 CHAN5G(214, 0), CHAN5G(216, 0),
272 CHAN5G(218, 0), CHAN5G(220, 0),
273 CHAN5G(222, 0), CHAN5G(224, 0),
274 CHAN5G(226, 0), CHAN5G(228, 0),
275}; 284};
276 285
277static struct ieee80211_channel b43_5ghz_aphy_chantable[] = { 286static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
@@ -295,6 +304,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
295 CHAN5G(208, 0), CHAN5G(212, 0), 304 CHAN5G(208, 0), CHAN5G(212, 0),
296 CHAN5G(216, 0), 305 CHAN5G(216, 0),
297}; 306};
307#undef CHAN4G
298#undef CHAN5G 308#undef CHAN5G
299 309
300static struct ieee80211_supported_band b43_band_5GHz_nphy = { 310static struct ieee80211_supported_band b43_band_5GHz_nphy = {
@@ -1175,18 +1185,7 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
1175 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags); 1185 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
1176 udelay(2); 1186 udelay(2);
1177 1187
1178 /* Take PHY out of reset */ 1188 b43_phy_take_out_of_reset(dev);
1179 flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1180 flags &= ~B43_BCMA_IOCTL_PHY_RESET;
1181 flags |= BCMA_IOCTL_FGC;
1182 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
1183 udelay(1);
1184
1185 /* Do not force clock anymore */
1186 flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1187 flags &= ~BCMA_IOCTL_FGC;
1188 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
1189 udelay(1);
1190} 1189}
1191 1190
1192static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1191static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
@@ -1195,18 +1194,22 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1195 B43_BCMA_CLKCTLST_PHY_PLL_REQ; 1194 B43_BCMA_CLKCTLST_PHY_PLL_REQ;
1196 u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST | 1195 u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
1197 B43_BCMA_CLKCTLST_PHY_PLL_ST; 1196 B43_BCMA_CLKCTLST_PHY_PLL_ST;
1197 u32 flags;
1198
1199 flags = B43_BCMA_IOCTL_PHY_CLKEN;
1200 if (gmode)
1201 flags |= B43_BCMA_IOCTL_GMODE;
1202 b43_device_enable(dev, flags);
1198 1203
1199 b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
1200 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST); 1204 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
1201 b43_bcma_phy_reset(dev); 1205 b43_bcma_phy_reset(dev);
1202 bcma_core_pll_ctl(dev->dev->bdev, req, status, true); 1206 bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
1203} 1207}
1204#endif 1208#endif
1205 1209
1210#ifdef CONFIG_B43_SSB
1206static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1211static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1207{ 1212{
1208 struct ssb_device *sdev = dev->dev->sdev;
1209 u32 tmslow;
1210 u32 flags = 0; 1213 u32 flags = 0;
1211 1214
1212 if (gmode) 1215 if (gmode)
@@ -1218,18 +1221,9 @@ static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1218 b43_device_enable(dev, flags); 1221 b43_device_enable(dev, flags);
1219 msleep(2); /* Wait for the PLL to turn on. */ 1222 msleep(2); /* Wait for the PLL to turn on. */
1220 1223
1221 /* Now take the PHY out of Reset again */ 1224 b43_phy_take_out_of_reset(dev);
1222 tmslow = ssb_read32(sdev, SSB_TMSLOW);
1223 tmslow |= SSB_TMSLOW_FGC;
1224 tmslow &= ~B43_TMSLOW_PHYRESET;
1225 ssb_write32(sdev, SSB_TMSLOW, tmslow);
1226 ssb_read32(sdev, SSB_TMSLOW); /* flush */
1227 msleep(1);
1228 tmslow &= ~SSB_TMSLOW_FGC;
1229 ssb_write32(sdev, SSB_TMSLOW, tmslow);
1230 ssb_read32(sdev, SSB_TMSLOW); /* flush */
1231 msleep(1);
1232} 1225}
1226#endif
1233 1227
1234void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1228void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1235{ 1229{
@@ -2704,32 +2698,37 @@ static int b43_upload_initvals(struct b43_wldev *dev)
2704 struct b43_firmware *fw = &dev->fw; 2698 struct b43_firmware *fw = &dev->fw;
2705 const struct b43_iv *ivals; 2699 const struct b43_iv *ivals;
2706 size_t count; 2700 size_t count;
2707 int err;
2708 2701
2709 hdr = (const struct b43_fw_header *)(fw->initvals.data->data); 2702 hdr = (const struct b43_fw_header *)(fw->initvals.data->data);
2710 ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len); 2703 ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len);
2711 count = be32_to_cpu(hdr->size); 2704 count = be32_to_cpu(hdr->size);
2712 err = b43_write_initvals(dev, ivals, count, 2705 return b43_write_initvals(dev, ivals, count,
2713 fw->initvals.data->size - hdr_len); 2706 fw->initvals.data->size - hdr_len);
2714 if (err) 2707}
2715 goto out;
2716 if (fw->initvals_band.data) {
2717 hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
2718 ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
2719 count = be32_to_cpu(hdr->size);
2720 err = b43_write_initvals(dev, ivals, count,
2721 fw->initvals_band.data->size - hdr_len);
2722 if (err)
2723 goto out;
2724 }
2725out:
2726 2708
2727 return err; 2709static int b43_upload_initvals_band(struct b43_wldev *dev)
2710{
2711 const size_t hdr_len = sizeof(struct b43_fw_header);
2712 const struct b43_fw_header *hdr;
2713 struct b43_firmware *fw = &dev->fw;
2714 const struct b43_iv *ivals;
2715 size_t count;
2716
2717 if (!fw->initvals_band.data)
2718 return 0;
2719
2720 hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
2721 ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
2722 count = be32_to_cpu(hdr->size);
2723 return b43_write_initvals(dev, ivals, count,
2724 fw->initvals_band.data->size - hdr_len);
2728} 2725}
2729 2726
2730/* Initialize the GPIOs 2727/* Initialize the GPIOs
2731 * http://bcm-specs.sipsolutions.net/GPIO 2728 * http://bcm-specs.sipsolutions.net/GPIO
2732 */ 2729 */
2730
2731#ifdef CONFIG_B43_SSB
2733static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev) 2732static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
2734{ 2733{
2735 struct ssb_bus *bus = dev->dev->sdev->bus; 2734 struct ssb_bus *bus = dev->dev->sdev->bus;
@@ -2740,10 +2739,13 @@ static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
2740 return bus->chipco.dev; 2739 return bus->chipco.dev;
2741#endif 2740#endif
2742} 2741}
2742#endif
2743 2743
2744static int b43_gpio_init(struct b43_wldev *dev) 2744static int b43_gpio_init(struct b43_wldev *dev)
2745{ 2745{
2746#ifdef CONFIG_B43_SSB
2746 struct ssb_device *gpiodev; 2747 struct ssb_device *gpiodev;
2748#endif
2747 u32 mask, set; 2749 u32 mask, set;
2748 2750
2749 b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0); 2751 b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
@@ -2802,7 +2804,9 @@ static int b43_gpio_init(struct b43_wldev *dev)
2802/* Turn off all GPIO stuff. Call this on module unload, for example. */ 2804/* Turn off all GPIO stuff. Call this on module unload, for example. */
2803static void b43_gpio_cleanup(struct b43_wldev *dev) 2805static void b43_gpio_cleanup(struct b43_wldev *dev)
2804{ 2806{
2807#ifdef CONFIG_B43_SSB
2805 struct ssb_device *gpiodev; 2808 struct ssb_device *gpiodev;
2809#endif
2806 2810
2807 switch (dev->dev->bus_type) { 2811 switch (dev->dev->bus_type) {
2808#ifdef CONFIG_B43_BCMA 2812#ifdef CONFIG_B43_BCMA
@@ -3086,6 +3090,10 @@ static int b43_chip_init(struct b43_wldev *dev)
3086 if (err) 3090 if (err)
3087 goto err_gpio_clean; 3091 goto err_gpio_clean;
3088 3092
3093 err = b43_upload_initvals_band(dev);
3094 if (err)
3095 goto err_gpio_clean;
3096
3089 /* Turn the Analog on and initialize the PHY. */ 3097 /* Turn the Analog on and initialize the PHY. */
3090 phy->ops->switch_analog(dev, 1); 3098 phy->ops->switch_analog(dev, 1);
3091 err = b43_phy_init(dev); 3099 err = b43_phy_init(dev);
@@ -3685,37 +3693,6 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
3685 mutex_unlock(&wl->mutex); 3693 mutex_unlock(&wl->mutex);
3686} 3694}
3687 3695
3688static void b43_put_phy_into_reset(struct b43_wldev *dev)
3689{
3690 u32 tmp;
3691
3692 switch (dev->dev->bus_type) {
3693#ifdef CONFIG_B43_BCMA
3694 case B43_BUS_BCMA:
3695 b43err(dev->wl,
3696 "Putting PHY into reset not supported on BCMA\n");
3697 break;
3698#endif
3699#ifdef CONFIG_B43_SSB
3700 case B43_BUS_SSB:
3701 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
3702 tmp &= ~B43_TMSLOW_GMODE;
3703 tmp |= B43_TMSLOW_PHYRESET;
3704 tmp |= SSB_TMSLOW_FGC;
3705 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
3706 msleep(1);
3707
3708 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
3709 tmp &= ~SSB_TMSLOW_FGC;
3710 tmp |= B43_TMSLOW_PHYRESET;
3711 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
3712 msleep(1);
3713
3714 break;
3715#endif
3716 }
3717}
3718
3719static const char *band_to_string(enum ieee80211_band band) 3696static const char *band_to_string(enum ieee80211_band band)
3720{ 3697{
3721 switch (band) { 3698 switch (band) {
@@ -3731,94 +3708,75 @@ static const char *band_to_string(enum ieee80211_band band)
3731} 3708}
3732 3709
3733/* Expects wl->mutex locked */ 3710/* Expects wl->mutex locked */
3734static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) 3711static int b43_switch_band(struct b43_wldev *dev,
3712 struct ieee80211_channel *chan)
3735{ 3713{
3736 struct b43_wldev *up_dev = NULL; 3714 struct b43_phy *phy = &dev->phy;
3737 struct b43_wldev *down_dev; 3715 bool gmode;
3738 struct b43_wldev *d; 3716 u32 tmp;
3739 int err;
3740 bool uninitialized_var(gmode);
3741 int prev_status;
3742 3717
3743 /* Find a device and PHY which supports the band. */ 3718 switch (chan->band) {
3744 list_for_each_entry(d, &wl->devlist, list) { 3719 case IEEE80211_BAND_5GHZ:
3745 switch (chan->band) { 3720 gmode = false;
3746 case IEEE80211_BAND_5GHZ: 3721 break;
3747 if (d->phy.supports_5ghz) { 3722 case IEEE80211_BAND_2GHZ:
3748 up_dev = d; 3723 gmode = true;
3749 gmode = false; 3724 break;
3750 } 3725 default:
3751 break; 3726 B43_WARN_ON(1);
3752 case IEEE80211_BAND_2GHZ: 3727 return -EINVAL;
3753 if (d->phy.supports_2ghz) {
3754 up_dev = d;
3755 gmode = true;
3756 }
3757 break;
3758 default:
3759 B43_WARN_ON(1);
3760 return -EINVAL;
3761 }
3762 if (up_dev)
3763 break;
3764 } 3728 }
3765 if (!up_dev) { 3729
3766 b43err(wl, "Could not find a device for %s-GHz band operation\n", 3730 if (!((gmode && phy->supports_2ghz) ||
3731 (!gmode && phy->supports_5ghz))) {
3732 b43err(dev->wl, "This device doesn't support %s-GHz band\n",
3767 band_to_string(chan->band)); 3733 band_to_string(chan->band));
3768 return -ENODEV; 3734 return -ENODEV;
3769 } 3735 }
3770 if ((up_dev == wl->current_dev) && 3736
3771 (!!wl->current_dev->phy.gmode == !!gmode)) { 3737 if (!!phy->gmode == !!gmode) {
3772 /* This device is already running. */ 3738 /* This device is already running. */
3773 return 0; 3739 return 0;
3774 } 3740 }
3775 b43dbg(wl, "Switching to %s-GHz band\n", 3741
3742 b43dbg(dev->wl, "Switching to %s GHz band\n",
3776 band_to_string(chan->band)); 3743 band_to_string(chan->band));
3777 down_dev = wl->current_dev;
3778 3744
3779 prev_status = b43_status(down_dev); 3745 /* Some new devices don't need disabling radio for band switching */
3780 /* Shutdown the currently running core. */ 3746 if (!(phy->type == B43_PHYTYPE_N && phy->rev >= 3))
3781 if (prev_status >= B43_STAT_STARTED) 3747 b43_software_rfkill(dev, true);
3782 down_dev = b43_wireless_core_stop(down_dev);
3783 if (prev_status >= B43_STAT_INITIALIZED)
3784 b43_wireless_core_exit(down_dev);
3785 3748
3786 if (down_dev != up_dev) { 3749 phy->gmode = gmode;
3787 /* We switch to a different core, so we put PHY into 3750 b43_phy_put_into_reset(dev);
3788 * RESET on the old core. */ 3751 switch (dev->dev->bus_type) {
3789 b43_put_phy_into_reset(down_dev); 3752#ifdef CONFIG_B43_BCMA
3753 case B43_BUS_BCMA:
3754 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
3755 if (gmode)
3756 tmp |= B43_BCMA_IOCTL_GMODE;
3757 else
3758 tmp &= ~B43_BCMA_IOCTL_GMODE;
3759 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
3760 break;
3761#endif
3762#ifdef CONFIG_B43_SSB
3763 case B43_BUS_SSB:
3764 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
3765 if (gmode)
3766 tmp |= B43_TMSLOW_GMODE;
3767 else
3768 tmp &= ~B43_TMSLOW_GMODE;
3769 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
3770 break;
3771#endif
3790 } 3772 }
3773 b43_phy_take_out_of_reset(dev);
3791 3774
3792 /* Now start the new core. */ 3775 b43_upload_initvals_band(dev);
3793 up_dev->phy.gmode = gmode;
3794 if (prev_status >= B43_STAT_INITIALIZED) {
3795 err = b43_wireless_core_init(up_dev);
3796 if (err) {
3797 b43err(wl, "Fatal: Could not initialize device for "
3798 "selected %s-GHz band\n",
3799 band_to_string(chan->band));
3800 goto init_failure;
3801 }
3802 }
3803 if (prev_status >= B43_STAT_STARTED) {
3804 err = b43_wireless_core_start(up_dev);
3805 if (err) {
3806 b43err(wl, "Fatal: Could not start device for "
3807 "selected %s-GHz band\n",
3808 band_to_string(chan->band));
3809 b43_wireless_core_exit(up_dev);
3810 goto init_failure;
3811 }
3812 }
3813 B43_WARN_ON(b43_status(up_dev) != prev_status);
3814 3776
3815 wl->current_dev = up_dev; 3777 b43_phy_init(dev);
3816 3778
3817 return 0; 3779 return 0;
3818init_failure:
3819 /* Whoops, failed to init the new core. No core is operating now. */
3820 wl->current_dev = NULL;
3821 return err;
3822} 3780}
3823 3781
3824/* Write the short and long frame retry limit values. */ 3782/* Write the short and long frame retry limit values. */
@@ -3851,8 +3809,10 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3851 3809
3852 dev = wl->current_dev; 3810 dev = wl->current_dev;
3853 3811
3812 b43_mac_suspend(dev);
3813
3854 /* Switch the band (if necessary). This might change the active core. */ 3814 /* Switch the band (if necessary). This might change the active core. */
3855 err = b43_switch_band(wl, conf->chandef.chan); 3815 err = b43_switch_band(dev, conf->chandef.chan);
3856 if (err) 3816 if (err)
3857 goto out_unlock_mutex; 3817 goto out_unlock_mutex;
3858 3818
@@ -3871,8 +3831,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3871 else 3831 else
3872 phy->is_40mhz = false; 3832 phy->is_40mhz = false;
3873 3833
3874 b43_mac_suspend(dev);
3875
3876 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 3834 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
3877 b43_set_retry_limits(dev, conf->short_frame_max_tx_count, 3835 b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
3878 conf->long_frame_max_tx_count); 3836 conf->long_frame_max_tx_count);
@@ -4582,8 +4540,12 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
4582 struct ssb_bus *bus; 4540 struct ssb_bus *bus;
4583 u32 tmp; 4541 u32 tmp;
4584 4542
4543#ifdef CONFIG_B43_SSB
4585 if (dev->dev->bus_type != B43_BUS_SSB) 4544 if (dev->dev->bus_type != B43_BUS_SSB)
4586 return; 4545 return;
4546#else
4547 return;
4548#endif
4587 4549
4588 bus = dev->dev->sdev->bus; 4550 bus = dev->dev->sdev->bus;
4589 4551
@@ -4738,7 +4700,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4738 } 4700 }
4739 if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW) 4701 if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)
4740 hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */ 4702 hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */
4741#ifdef CONFIG_SSB_DRIVER_PCICORE 4703#if defined(CONFIG_B43_SSB) && defined(CONFIG_SSB_DRIVER_PCICORE)
4742 if (dev->dev->bus_type == B43_BUS_SSB && 4704 if (dev->dev->bus_type == B43_BUS_SSB &&
4743 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && 4705 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
4744 dev->dev->sdev->bus->pcicore.dev->id.revision <= 10) 4706 dev->dev->sdev->bus->pcicore.dev->id.revision <= 10)
@@ -5129,10 +5091,82 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
5129 b43_phy_free(dev); 5091 b43_phy_free(dev);
5130} 5092}
5131 5093
5094static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5095 bool *have_5ghz_phy)
5096{
5097 u16 dev_id = 0;
5098
5099#ifdef CONFIG_B43_BCMA
5100 if (dev->dev->bus_type == B43_BUS_BCMA &&
5101 dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI)
5102 dev_id = dev->dev->bdev->bus->host_pci->device;
5103#endif
5104#ifdef CONFIG_B43_SSB
5105 if (dev->dev->bus_type == B43_BUS_SSB &&
5106 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
5107 dev_id = dev->dev->sdev->bus->host_pci->device;
5108#endif
5109 /* Override with SPROM value if available */
5110 if (dev->dev->bus_sprom->dev_id)
5111 dev_id = dev->dev->bus_sprom->dev_id;
5112
5113 /* Note: below IDs can be "virtual" (not maching e.g. real PCI ID) */
5114 switch (dev_id) {
5115 case 0x4324: /* BCM4306 */
5116 case 0x4312: /* BCM4311 */
5117 case 0x4319: /* BCM4318 */
5118 case 0x4328: /* BCM4321 */
5119 case 0x432b: /* BCM4322 */
5120 case 0x4350: /* BCM43222 */
5121 case 0x4353: /* BCM43224 */
5122 case 0x0576: /* BCM43224 */
5123 case 0x435f: /* BCM6362 */
5124 case 0x4331: /* BCM4331 */
5125 case 0x4359: /* BCM43228 */
5126 case 0x43a0: /* BCM4360 */
5127 case 0x43b1: /* BCM4352 */
5128 /* Dual band devices */
5129 *have_2ghz_phy = true;
5130 *have_5ghz_phy = true;
5131 return;
5132 case 0x4321: /* BCM4306 */
5133 case 0x4313: /* BCM4311 */
5134 case 0x431a: /* BCM4318 */
5135 case 0x432a: /* BCM4321 */
5136 case 0x432d: /* BCM4322 */
5137 case 0x4352: /* BCM43222 */
5138 case 0x4333: /* BCM4331 */
5139 case 0x43a2: /* BCM4360 */
5140 case 0x43b3: /* BCM4352 */
5141 /* 5 GHz only devices */
5142 *have_2ghz_phy = false;
5143 *have_5ghz_phy = true;
5144 return;
5145 }
5146
5147 /* As a fallback, try to guess using PHY type */
5148 switch (dev->phy.type) {
5149 case B43_PHYTYPE_A:
5150 *have_2ghz_phy = false;
5151 *have_5ghz_phy = true;
5152 return;
5153 case B43_PHYTYPE_G:
5154 case B43_PHYTYPE_N:
5155 case B43_PHYTYPE_LP:
5156 case B43_PHYTYPE_HT:
5157 case B43_PHYTYPE_LCN:
5158 *have_2ghz_phy = true;
5159 *have_5ghz_phy = false;
5160 return;
5161 }
5162
5163 B43_WARN_ON(1);
5164}
5165
5132static int b43_wireless_core_attach(struct b43_wldev *dev) 5166static int b43_wireless_core_attach(struct b43_wldev *dev)
5133{ 5167{
5134 struct b43_wl *wl = dev->wl; 5168 struct b43_wl *wl = dev->wl;
5135 struct pci_dev *pdev = NULL; 5169 struct b43_phy *phy = &dev->phy;
5136 int err; 5170 int err;
5137 u32 tmp; 5171 u32 tmp;
5138 bool have_2ghz_phy = false, have_5ghz_phy = false; 5172 bool have_2ghz_phy = false, have_5ghz_phy = false;
@@ -5144,19 +5178,15 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5144 * that in core_init(), too. 5178 * that in core_init(), too.
5145 */ 5179 */
5146 5180
5147#ifdef CONFIG_B43_SSB
5148 if (dev->dev->bus_type == B43_BUS_SSB &&
5149 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
5150 pdev = dev->dev->sdev->bus->host_pci;
5151#endif
5152
5153 err = b43_bus_powerup(dev, 0); 5181 err = b43_bus_powerup(dev, 0);
5154 if (err) { 5182 if (err) {
5155 b43err(wl, "Bus powerup failed\n"); 5183 b43err(wl, "Bus powerup failed\n");
5156 goto out; 5184 goto out;
5157 } 5185 }
5158 5186
5159 /* Get the PHY type. */ 5187 phy->do_full_init = true;
5188
5189 /* Try to guess supported bands for the first init needs */
5160 switch (dev->dev->bus_type) { 5190 switch (dev->dev->bus_type) {
5161#ifdef CONFIG_B43_BCMA 5191#ifdef CONFIG_B43_BCMA
5162 case B43_BUS_BCMA: 5192 case B43_BUS_BCMA:
@@ -5178,51 +5208,31 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5178 } 5208 }
5179 5209
5180 dev->phy.gmode = have_2ghz_phy; 5210 dev->phy.gmode = have_2ghz_phy;
5181 dev->phy.radio_on = true;
5182 b43_wireless_core_reset(dev, dev->phy.gmode); 5211 b43_wireless_core_reset(dev, dev->phy.gmode);
5183 5212
5213 /* Get the PHY type. */
5184 err = b43_phy_versioning(dev); 5214 err = b43_phy_versioning(dev);
5185 if (err) 5215 if (err)
5186 goto err_powerdown; 5216 goto err_powerdown;
5187 /* Check if this device supports multiband. */ 5217
5188 if (!pdev || 5218 /* Get real info about supported bands */
5189 (pdev->device != 0x4312 && 5219 b43_supported_bands(dev, &have_2ghz_phy, &have_5ghz_phy);
5190 pdev->device != 0x4319 && pdev->device != 0x4324)) { 5220
5191 /* No multiband support. */ 5221 /* We don't support 5 GHz on some PHYs yet */
5192 have_2ghz_phy = false; 5222 switch (dev->phy.type) {
5223 case B43_PHYTYPE_A:
5224 case B43_PHYTYPE_N:
5225 case B43_PHYTYPE_LP:
5226 case B43_PHYTYPE_HT:
5227 b43warn(wl, "5 GHz band is unsupported on this PHY\n");
5193 have_5ghz_phy = false; 5228 have_5ghz_phy = false;
5194 switch (dev->phy.type) {
5195 case B43_PHYTYPE_A:
5196 have_5ghz_phy = true;
5197 break;
5198 case B43_PHYTYPE_LP: //FIXME not always!
5199#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
5200 have_5ghz_phy = 1;
5201#endif
5202 case B43_PHYTYPE_G:
5203 case B43_PHYTYPE_N:
5204 case B43_PHYTYPE_HT:
5205 case B43_PHYTYPE_LCN:
5206 have_2ghz_phy = true;
5207 break;
5208 default:
5209 B43_WARN_ON(1);
5210 }
5211 } 5229 }
5212 if (dev->phy.type == B43_PHYTYPE_A) { 5230
5213 /* FIXME */ 5231 if (!have_2ghz_phy && !have_5ghz_phy) {
5214 b43err(wl, "IEEE 802.11a devices are unsupported\n"); 5232 b43err(wl, "b43 can't support any band on this device\n");
5215 err = -EOPNOTSUPP; 5233 err = -EOPNOTSUPP;
5216 goto err_powerdown; 5234 goto err_powerdown;
5217 } 5235 }
5218 if (1 /* disable A-PHY */) {
5219 /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
5220 if (dev->phy.type != B43_PHYTYPE_N &&
5221 dev->phy.type != B43_PHYTYPE_LP) {
5222 have_2ghz_phy = true;
5223 have_5ghz_phy = false;
5224 }
5225 }
5226 5236
5227 err = b43_phy_allocate(dev); 5237 err = b43_phy_allocate(dev);
5228 if (err) 5238 if (err)
@@ -5270,7 +5280,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
5270 b43_debugfs_remove_device(wldev); 5280 b43_debugfs_remove_device(wldev);
5271 b43_wireless_core_detach(wldev); 5281 b43_wireless_core_detach(wldev);
5272 list_del(&wldev->list); 5282 list_del(&wldev->list);
5273 wl->nr_devs--;
5274 b43_bus_set_wldev(dev, NULL); 5283 b43_bus_set_wldev(dev, NULL);
5275 kfree(wldev); 5284 kfree(wldev);
5276} 5285}
@@ -5295,8 +5304,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
5295 if (err) 5304 if (err)
5296 goto err_kfree_wldev; 5305 goto err_kfree_wldev;
5297 5306
5298 list_add(&wldev->list, &wl->devlist);
5299 wl->nr_devs++;
5300 b43_bus_set_wldev(dev, wldev); 5307 b43_bus_set_wldev(dev, wldev);
5301 b43_debugfs_add_device(wldev); 5308 b43_debugfs_add_device(wldev);
5302 5309
@@ -5314,6 +5321,7 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
5314 (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \ 5321 (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \
5315 (pdev->subsystem_device == _subdevice) ) 5322 (pdev->subsystem_device == _subdevice) )
5316 5323
5324#ifdef CONFIG_B43_SSB
5317static void b43_sprom_fixup(struct ssb_bus *bus) 5325static void b43_sprom_fixup(struct ssb_bus *bus)
5318{ 5326{
5319 struct pci_dev *pdev; 5327 struct pci_dev *pdev;
@@ -5345,6 +5353,7 @@ static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
5345 ssb_set_devtypedata(dev->sdev, NULL); 5353 ssb_set_devtypedata(dev->sdev, NULL);
5346 ieee80211_free_hw(hw); 5354 ieee80211_free_hw(hw);
5347} 5355}
5356#endif
5348 5357
5349static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) 5358static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5350{ 5359{
@@ -5386,7 +5395,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5386 wl->hw = hw; 5395 wl->hw = hw;
5387 mutex_init(&wl->mutex); 5396 mutex_init(&wl->mutex);
5388 spin_lock_init(&wl->hardirq_lock); 5397 spin_lock_init(&wl->hardirq_lock);
5389 INIT_LIST_HEAD(&wl->devlist);
5390 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); 5398 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
5391 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); 5399 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
5392 INIT_WORK(&wl->tx_work, b43_tx_work); 5400 INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5494,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
5486 struct b43_bus_dev *dev; 5494 struct b43_bus_dev *dev;
5487 struct b43_wl *wl; 5495 struct b43_wl *wl;
5488 int err; 5496 int err;
5489 int first = 0;
5490 5497
5491 dev = b43_bus_dev_ssb_init(sdev); 5498 dev = b43_bus_dev_ssb_init(sdev);
5492 if (!dev) 5499 if (!dev)
5493 return -ENOMEM; 5500 return -ENOMEM;
5494 5501
5495 wl = ssb_get_devtypedata(sdev); 5502 wl = ssb_get_devtypedata(sdev);
5496 if (!wl) { 5503 if (wl) {
5497 /* Probing the first core. Must setup common struct b43_wl */ 5504 b43err(NULL, "Dual-core devices are not supported\n");
5498 first = 1; 5505 err = -ENOTSUPP;
5499 b43_sprom_fixup(sdev->bus); 5506 goto err_ssb_kfree_dev;
5500 wl = b43_wireless_init(dev); 5507 }
5501 if (IS_ERR(wl)) { 5508
5502 err = PTR_ERR(wl); 5509 b43_sprom_fixup(sdev->bus);
5503 goto out; 5510
5504 } 5511 wl = b43_wireless_init(dev);
5505 ssb_set_devtypedata(sdev, wl); 5512 if (IS_ERR(wl)) {
5506 B43_WARN_ON(ssb_get_devtypedata(sdev) != wl); 5513 err = PTR_ERR(wl);
5514 goto err_ssb_kfree_dev;
5507 } 5515 }
5516 ssb_set_devtypedata(sdev, wl);
5517 B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
5518
5508 err = b43_one_core_attach(dev, wl); 5519 err = b43_one_core_attach(dev, wl);
5509 if (err) 5520 if (err)
5510 goto err_wireless_exit; 5521 goto err_ssb_wireless_exit;
5511 5522
5512 /* setup and start work to load firmware */ 5523 /* setup and start work to load firmware */
5513 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5524 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5514 schedule_work(&wl->firmware_load); 5525 schedule_work(&wl->firmware_load);
5515 5526
5516 out:
5517 return err; 5527 return err;
5518 5528
5519 err_wireless_exit: 5529err_ssb_wireless_exit:
5520 if (first) 5530 b43_wireless_exit(dev, wl);
5521 b43_wireless_exit(dev, wl); 5531err_ssb_kfree_dev:
5532 kfree(dev);
5522 return err; 5533 return err;
5523} 5534}
5524 5535
@@ -5546,13 +5557,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5546 /* Unregister HW RNG driver */ 5557 /* Unregister HW RNG driver */
5547 b43_rng_exit(wl); 5558 b43_rng_exit(wl);
5548 5559
5549 if (list_empty(&wl->devlist)) { 5560 b43_leds_unregister(wl);
5550 b43_leds_unregister(wl); 5561 b43_wireless_exit(dev, wl);
5551 /* Last core on the chip unregistered.
5552 * We can destroy common struct b43_wl.
5553 */
5554 b43_wireless_exit(dev, wl);
5555 }
5556} 5562}
5557 5563
5558static struct ssb_driver b43_ssb_driver = { 5564static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index dbaa51890198..08244b3b327e 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -96,12 +96,16 @@ int b43_phy_init(struct b43_wldev *dev)
96 96
97 phy->channel = ops->get_default_chan(dev); 97 phy->channel = ops->get_default_chan(dev);
98 98
99 ops->software_rfkill(dev, false); 99 phy->ops->switch_analog(dev, true);
100 b43_software_rfkill(dev, false);
101
100 err = ops->init(dev); 102 err = ops->init(dev);
101 if (err) { 103 if (err) {
102 b43err(dev->wl, "PHY init failed\n"); 104 b43err(dev->wl, "PHY init failed\n");
103 goto err_block_rf; 105 goto err_block_rf;
104 } 106 }
107 phy->do_full_init = false;
108
105 /* Make sure to switch hardware and firmware (SHM) to 109 /* Make sure to switch hardware and firmware (SHM) to
106 * the default channel. */ 110 * the default channel. */
107 err = b43_switch_channel(dev, ops->get_default_chan(dev)); 111 err = b43_switch_channel(dev, ops->get_default_chan(dev));
@@ -113,10 +117,11 @@ int b43_phy_init(struct b43_wldev *dev)
113 return 0; 117 return 0;
114 118
115err_phy_exit: 119err_phy_exit:
120 phy->do_full_init = true;
116 if (ops->exit) 121 if (ops->exit)
117 ops->exit(dev); 122 ops->exit(dev);
118err_block_rf: 123err_block_rf:
119 ops->software_rfkill(dev, true); 124 b43_software_rfkill(dev, true);
120 125
121 return err; 126 return err;
122} 127}
@@ -125,7 +130,8 @@ void b43_phy_exit(struct b43_wldev *dev)
125{ 130{
126 const struct b43_phy_operations *ops = dev->phy.ops; 131 const struct b43_phy_operations *ops = dev->phy.ops;
127 132
128 ops->software_rfkill(dev, true); 133 b43_software_rfkill(dev, true);
134 dev->phy.do_full_init = true;
129 if (ops->exit) 135 if (ops->exit)
130 ops->exit(dev); 136 ops->exit(dev);
131} 137}
@@ -312,6 +318,90 @@ void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
312 } 318 }
313} 319}
314 320
321void b43_phy_put_into_reset(struct b43_wldev *dev)
322{
323 u32 tmp;
324
325 switch (dev->dev->bus_type) {
326#ifdef CONFIG_B43_BCMA
327 case B43_BUS_BCMA:
328 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
329 tmp &= ~B43_BCMA_IOCTL_GMODE;
330 tmp |= B43_BCMA_IOCTL_PHY_RESET;
331 tmp |= BCMA_IOCTL_FGC;
332 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
333 udelay(1);
334
335 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
336 tmp &= ~BCMA_IOCTL_FGC;
337 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
338 udelay(1);
339 break;
340#endif
341#ifdef CONFIG_B43_SSB
342 case B43_BUS_SSB:
343 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
344 tmp &= ~B43_TMSLOW_GMODE;
345 tmp |= B43_TMSLOW_PHYRESET;
346 tmp |= SSB_TMSLOW_FGC;
347 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
348 usleep_range(1000, 2000);
349
350 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
351 tmp &= ~SSB_TMSLOW_FGC;
352 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
353 usleep_range(1000, 2000);
354
355 break;
356#endif
357 }
358}
359
360void b43_phy_take_out_of_reset(struct b43_wldev *dev)
361{
362 u32 tmp;
363
364 switch (dev->dev->bus_type) {
365#ifdef CONFIG_B43_BCMA
366 case B43_BUS_BCMA:
367 /* Unset reset bit (with forcing clock) */
368 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
369 tmp &= ~B43_BCMA_IOCTL_PHY_RESET;
370 tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
371 tmp |= BCMA_IOCTL_FGC;
372 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
373 udelay(1);
374
375 /* Do not force clock anymore */
376 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
377 tmp &= ~BCMA_IOCTL_FGC;
378 tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
379 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
380 udelay(1);
381 break;
382#endif
383#ifdef CONFIG_B43_SSB
384 case B43_BUS_SSB:
385 /* Unset reset bit (with forcing clock) */
386 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
387 tmp &= ~B43_TMSLOW_PHYRESET;
388 tmp &= ~B43_TMSLOW_PHYCLKEN;
389 tmp |= SSB_TMSLOW_FGC;
390 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
391 ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
392 usleep_range(1000, 2000);
393
394 tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
395 tmp &= ~SSB_TMSLOW_FGC;
396 tmp |= B43_TMSLOW_PHYCLKEN;
397 ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
398 ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
399 usleep_range(1000, 2000);
400 break;
401#endif
402 }
403}
404
315int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel) 405int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
316{ 406{
317 struct b43_phy *phy = &(dev->phy); 407 struct b43_phy *phy = &(dev->phy);
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index f1b999349876..4ad6240d9ff4 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -231,9 +231,12 @@ struct b43_phy {
231 /* HT info */ 231 /* HT info */
232 bool is_40mhz; 232 bool is_40mhz;
233 233
234 /* GMODE bit enabled? */ 234 /* Is GMODE (2 GHz mode) bit enabled? */
235 bool gmode; 235 bool gmode;
236 236
237 /* After power reset full init has to be performed */
238 bool do_full_init;
239
237 /* Analog Type */ 240 /* Analog Type */
238 u8 analog; 241 u8 analog;
239 /* B43_PHYTYPE_ */ 242 /* B43_PHYTYPE_ */
@@ -390,6 +393,9 @@ void b43_phy_lock(struct b43_wldev *dev);
390 */ 393 */
391void b43_phy_unlock(struct b43_wldev *dev); 394void b43_phy_unlock(struct b43_wldev *dev);
392 395
396void b43_phy_put_into_reset(struct b43_wldev *dev);
397void b43_phy_take_out_of_reset(struct b43_wldev *dev);
398
393/** 399/**
394 * b43_switch_channel - Switch to another channel 400 * b43_switch_channel - Switch to another channel
395 */ 401 */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 12f467b8d564..8f5c14bc10e6 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
1587 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); 1587 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
1588} 1588}
1589 1589
1590/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
1590static void b43_phy_initb6(struct b43_wldev *dev) 1591static void b43_phy_initb6(struct b43_wldev *dev)
1591{ 1592{
1592 struct b43_phy *phy = &dev->phy; 1593 struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1670 b43_radio_write16(dev, 0x50, 0x20); 1671 b43_radio_write16(dev, 0x50, 0x20);
1671 } 1672 }
1672 if (phy->radio_rev <= 2) { 1673 if (phy->radio_rev <= 2) {
1673 b43_radio_write16(dev, 0x7C, 0x20); 1674 b43_radio_write16(dev, 0x50, 0x20);
1674 b43_radio_write16(dev, 0x5A, 0x70); 1675 b43_radio_write16(dev, 0x5A, 0x70);
1675 b43_radio_write16(dev, 0x5B, 0x7B); 1676 b43_radio_write16(dev, 0x5B, 0x7B);
1676 b43_radio_write16(dev, 0x5C, 0xB0); 1677 b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1686 b43_phy_write(dev, 0x2A, 0x8AC0); 1687 b43_phy_write(dev, 0x2A, 0x8AC0);
1687 b43_phy_write(dev, 0x0038, 0x0668); 1688 b43_phy_write(dev, 0x0038, 0x0668);
1688 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); 1689 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
1689 if (phy->radio_rev <= 5) { 1690 if (phy->radio_rev == 4 || phy->radio_rev == 5)
1690 b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003); 1691 b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
1691 }
1692 if (phy->radio_rev <= 2) 1692 if (phy->radio_rev <= 2)
1693 b43_radio_write16(dev, 0x005D, 0x000D); 1693 b43_radio_write16(dev, 0x005D, 0x000D);
1694 1694
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 24ccbe96e0c8..86569f6a8705 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
257 } 257 }
258} 258}
259 259
260static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
261 enum n_intc_override intc_override,
262 u16 value, u8 core_sel)
263{
264 u16 reg, tmp, tmp2, val;
265 int core;
266
267 for (core = 0; core < 2; core++) {
268 if ((core_sel == 1 && core != 0) ||
269 (core_sel == 2 && core != 1))
270 continue;
271
272 reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
273
274 switch (intc_override) {
275 case N_INTC_OVERRIDE_OFF:
276 b43_phy_write(dev, reg, 0);
277 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
278 break;
279 case N_INTC_OVERRIDE_TRSW:
280 b43_phy_maskset(dev, reg, ~0xC0, value << 6);
281 b43_phy_set(dev, reg, 0x400);
282
283 b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
284 b43_phy_set(dev, 0x2ff, 0x2000);
285 b43_phy_set(dev, 0x2ff, 0x0001);
286 break;
287 case N_INTC_OVERRIDE_PA:
288 tmp = 0x0030;
289 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
290 val = value << 5;
291 else
292 val = value << 4;
293 b43_phy_maskset(dev, reg, ~tmp, val);
294 b43_phy_set(dev, reg, 0x1000);
295 break;
296 case N_INTC_OVERRIDE_EXT_LNA_PU:
297 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
298 tmp = 0x0001;
299 tmp2 = 0x0004;
300 val = value;
301 } else {
302 tmp = 0x0004;
303 tmp2 = 0x0001;
304 val = value << 2;
305 }
306 b43_phy_maskset(dev, reg, ~tmp, val);
307 b43_phy_mask(dev, reg, ~tmp2);
308 break;
309 case N_INTC_OVERRIDE_EXT_LNA_GAIN:
310 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
311 tmp = 0x0002;
312 tmp2 = 0x0008;
313 val = value << 1;
314 } else {
315 tmp = 0x0008;
316 tmp2 = 0x0002;
317 val = value << 3;
318 }
319 b43_phy_maskset(dev, reg, ~tmp, val);
320 b43_phy_mask(dev, reg, ~tmp2);
321 break;
322 }
323 }
324}
325
260/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ 326/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
261static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, 327static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
262 enum n_intc_override intc_override, 328 enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
265 u8 i, j; 331 u8 i, j;
266 u16 reg, tmp, val; 332 u16 reg, tmp, val;
267 333
334 if (dev->phy.rev >= 7) {
335 b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
336 core);
337 return;
338 }
339
268 B43_WARN_ON(dev->phy.rev < 3); 340 B43_WARN_ON(dev->phy.rev < 3);
269 341
270 for (i = 0; i < 2; i++) { 342 for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
419 static const u16 clip[] = { 0xFFFF, 0xFFFF }; 491 static const u16 clip[] = { 0xFFFF, 0xFFFF };
420 if (nphy->deaf_count++ == 0) { 492 if (nphy->deaf_count++ == 0) {
421 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); 493 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
422 b43_nphy_classifier(dev, 0x7, 0); 494 b43_nphy_classifier(dev, 0x7,
495 B43_NPHY_CLASSCTL_WAITEDEN);
423 b43_nphy_read_clip_detection(dev, nphy->clip_state); 496 b43_nphy_read_clip_detection(dev, nphy->clip_state);
424 b43_nphy_write_clip_detection(dev, clip); 497 b43_nphy_write_clip_detection(dev, clip);
425 } 498 }
@@ -627,13 +700,11 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
627 b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78); 700 b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
628 b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80); 701 b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
629 702
630 if (dev->phy.n->init_por) { 703 if (dev->phy.do_full_init) {
631 b43_radio_2057_rcal(dev); 704 b43_radio_2057_rcal(dev);
632 b43_radio_2057_rccal(dev); 705 b43_radio_2057_rccal(dev);
633 } 706 }
634 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8); 707 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
635
636 dev->phy.n->init_por = false;
637} 708}
638 709
639/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ 710/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
@@ -734,9 +805,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
734 u16 bias, cbias; 805 u16 bias, cbias;
735 u16 pag_boost, padg_boost, pgag_boost, mixg_boost; 806 u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
736 u16 paa_boost, pada_boost, pgaa_boost, mixa_boost; 807 u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
808 bool is_pkg_fab_smic;
737 809
738 B43_WARN_ON(dev->phy.rev < 3); 810 B43_WARN_ON(dev->phy.rev < 3);
739 811
812 is_pkg_fab_smic =
813 ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
814 dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
815 dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
816 dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
817
740 b43_chantab_radio_2056_upload(dev, e); 818 b43_chantab_radio_2056_upload(dev, e);
741 b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); 819 b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
742 820
@@ -744,7 +822,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
744 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 822 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
745 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); 823 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
746 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); 824 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
747 if (dev->dev->chip_id == 0x4716) { 825 if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
826 dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
748 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14); 827 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
749 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0); 828 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
750 } else { 829 } else {
@@ -752,6 +831,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
752 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14); 831 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
753 } 832 }
754 } 833 }
834 if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
835 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
836 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
837 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
838 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
839 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
840 }
755 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && 841 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
756 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 842 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
757 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); 843 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +853,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
767 b43_radio_write(dev, 853 b43_radio_write(dev,
768 offset | B2056_TX_PADG_IDAC, 0xcc); 854 offset | B2056_TX_PADG_IDAC, 0xcc);
769 855
770 if (dev->dev->chip_id == 0x4716) { 856 if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
857 dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
771 bias = 0x40; 858 bias = 0x40;
772 cbias = 0x45; 859 cbias = 0x45;
773 pag_boost = 0x5; 860 pag_boost = 0x5;
@@ -776,6 +863,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
776 } else { 863 } else {
777 bias = 0x25; 864 bias = 0x25;
778 cbias = 0x20; 865 cbias = 0x20;
866 if (is_pkg_fab_smic) {
867 bias = 0x2a;
868 cbias = 0x38;
869 }
779 pag_boost = 0x4; 870 pag_boost = 0x4;
780 pgag_boost = 0x03; 871 pgag_boost = 0x03;
781 mixg_boost = 0x65; 872 mixg_boost = 0x65;
@@ -844,6 +935,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
844 mixa_boost = 0xF; 935 mixa_boost = 0xF;
845 } 936 }
846 937
938 cbias = is_pkg_fab_smic ? 0x35 : 0x30;
939
847 for (i = 0; i < 2; i++) { 940 for (i = 0; i < 2; i++) {
848 offset = i ? B2056_TX1 : B2056_TX0; 941 offset = i ? B2056_TX1 : B2056_TX0;
849 942
@@ -862,11 +955,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
862 b43_radio_write(dev, 955 b43_radio_write(dev,
863 offset | B2056_TX_PADA_CASCBIAS, 0x03); 956 offset | B2056_TX_PADA_CASCBIAS, 0x03);
864 b43_radio_write(dev, 957 b43_radio_write(dev,
865 offset | B2056_TX_INTPAA_IAUX_STAT, 0x50); 958 offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
866 b43_radio_write(dev, 959 b43_radio_write(dev,
867 offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50); 960 offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
868 b43_radio_write(dev, 961 b43_radio_write(dev,
869 offset | B2056_TX_INTPAA_CASCBIAS, 0x30); 962 offset | B2056_TX_INTPAA_CASCBIAS, cbias);
870 } 963 }
871 } 964 }
872 965
@@ -933,7 +1026,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
933 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); 1026 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
934 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); 1027 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
935 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); 1028 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
936 if (dev->phy.n->init_por) 1029 if (dev->phy.do_full_init)
937 b43_radio_2056_rcal(dev); 1030 b43_radio_2056_rcal(dev);
938} 1031}
939 1032
@@ -946,8 +1039,6 @@ static void b43_radio_init2056(struct b43_wldev *dev)
946 b43_radio_init2056_pre(dev); 1039 b43_radio_init2056_pre(dev);
947 b2056_upload_inittabs(dev, 0, 0); 1040 b2056_upload_inittabs(dev, 0, 0);
948 b43_radio_init2056_post(dev); 1041 b43_radio_init2056_post(dev);
949
950 dev->phy.n->init_por = false;
951} 1042}
952 1043
953/************************************************** 1044/**************************************************
@@ -1164,23 +1255,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
1164 u16 seq_mode; 1255 u16 seq_mode;
1165 u32 tmp; 1256 u32 tmp;
1166 1257
1167 if (nphy->hang_avoid) 1258 b43_nphy_stay_in_carrier_search(dev, true);
1168 b43_nphy_stay_in_carrier_search(dev, true);
1169 1259
1170 if ((nphy->bb_mult_save & 0x80000000) == 0) { 1260 if ((nphy->bb_mult_save & 0x80000000) == 0) {
1171 tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); 1261 tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
1172 nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; 1262 nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
1173 } 1263 }
1174 1264
1265 /* TODO: add modify_bbmult argument */
1175 if (!dev->phy.is_40mhz) 1266 if (!dev->phy.is_40mhz)
1176 tmp = 0x6464; 1267 tmp = 0x6464;
1177 else 1268 else
1178 tmp = 0x4747; 1269 tmp = 0x4747;
1179 b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); 1270 b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
1180 1271
1181 if (nphy->hang_avoid)
1182 b43_nphy_stay_in_carrier_search(dev, false);
1183
1184 b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); 1272 b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
1185 1273
1186 if (loops != 0xFFFF) 1274 if (loops != 0xFFFF)
@@ -1213,6 +1301,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
1213 b43err(dev->wl, "run samples timeout\n"); 1301 b43err(dev->wl, "run samples timeout\n");
1214 1302
1215 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); 1303 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
1304
1305 b43_nphy_stay_in_carrier_search(dev, false);
1216} 1306}
1217 1307
1218/************************************************** 1308/**************************************************
@@ -1588,8 +1678,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1588 struct b43_phy_n *nphy = dev->phy.n; 1678 struct b43_phy_n *nphy = dev->phy.n;
1589 1679
1590 u16 saved_regs_phy_rfctl[2]; 1680 u16 saved_regs_phy_rfctl[2];
1591 u16 saved_regs_phy[13]; 1681 u16 saved_regs_phy[22];
1592 u16 regs_to_store[] = { 1682 u16 regs_to_store_rev3[] = {
1593 B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER, 1683 B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
1594 B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2, 1684 B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
1595 B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER, 1685 B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1688,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1598 B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2, 1688 B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
1599 B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2 1689 B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
1600 }; 1690 };
1691 u16 regs_to_store_rev7[] = {
1692 B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
1693 B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
1694 B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
1695 0x342, 0x343, 0x346, 0x347,
1696 0x2ff,
1697 B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
1698 B43_NPHY_RFCTL_CMD,
1699 B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
1700 0x340, 0x341, 0x344, 0x345,
1701 B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
1702 };
1703 u16 *regs_to_store;
1704 int regs_amount;
1601 1705
1602 u16 class; 1706 u16 class;
1603 1707
@@ -1617,6 +1721,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1617 u8 rx_core_state; 1721 u8 rx_core_state;
1618 int core, i, j, vcm; 1722 int core, i, j, vcm;
1619 1723
1724 if (dev->phy.rev >= 7) {
1725 regs_to_store = regs_to_store_rev7;
1726 regs_amount = ARRAY_SIZE(regs_to_store_rev7);
1727 } else {
1728 regs_to_store = regs_to_store_rev3;
1729 regs_amount = ARRAY_SIZE(regs_to_store_rev3);
1730 }
1731 BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
1732
1620 class = b43_nphy_classifier(dev, 0, 0); 1733 class = b43_nphy_classifier(dev, 0, 0);
1621 b43_nphy_classifier(dev, 7, 4); 1734 b43_nphy_classifier(dev, 7, 4);
1622 b43_nphy_read_clip_detection(dev, clip_state); 1735 b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1737,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1624 1737
1625 saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); 1738 saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
1626 saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); 1739 saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
1627 for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) 1740 for (i = 0; i < regs_amount; i++)
1628 saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]); 1741 saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
1629 1742
1630 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7); 1743 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
1631 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7); 1744 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
1632 b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false); 1745
1633 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false); 1746 if (dev->phy.rev >= 7) {
1634 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false); 1747 /* TODO */
1635 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false); 1748 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1636 1749 } else {
1637 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 1750 }
1638 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
1639 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
1640 } else { 1751 } else {
1641 b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false); 1752 b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
1642 b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false); 1753 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
1754 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
1755 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
1756 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1757 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
1758 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
1759 } else {
1760 b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
1761 b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
1762 }
1643 } 1763 }
1644 1764
1645 rx_core_state = b43_nphy_get_rx_core_state(dev); 1765 rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1774,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1654 1774
1655 /* Grab RSSI results for every possible VCM */ 1775 /* Grab RSSI results for every possible VCM */
1656 for (vcm = 0; vcm < 8; vcm++) { 1776 for (vcm = 0; vcm < 8; vcm++) {
1657 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, 1777 if (dev->phy.rev >= 7)
1658 vcm << 2); 1778 ;
1779 else
1780 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
1781 0xE3, vcm << 2);
1659 b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8); 1782 b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
1660 } 1783 }
1661 1784
@@ -1682,8 +1805,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1682 } 1805 }
1683 1806
1684 /* Select the best VCM */ 1807 /* Select the best VCM */
1685 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, 1808 if (dev->phy.rev >= 7)
1686 vcm_final << 2); 1809 ;
1810 else
1811 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
1812 0xE3, vcm_final << 2);
1687 1813
1688 for (i = 0; i < 4; i++) { 1814 for (i = 0; i < 4; i++) {
1689 if (core != i / 2) 1815 if (core != i / 2)
@@ -1736,9 +1862,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1736 1862
1737 b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); 1863 b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
1738 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX); 1864 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
1739 b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1); 1865 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
1740 1866
1741 for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) 1867 for (i = 0; i < regs_amount; i++)
1742 b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]); 1868 b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
1743 1869
1744 /* Store for future configuration */ 1870 /* Store for future configuration */
@@ -2494,8 +2620,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2494 struct ssb_sprom *sprom = dev->dev->bus_sprom; 2620 struct ssb_sprom *sprom = dev->dev->bus_sprom;
2495 2621
2496 /* TX to RX */ 2622 /* TX to RX */
2497 u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; 2623 u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
2498 u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 }; 2624 u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
2499 /* RX to TX */ 2625 /* RX to TX */
2500 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, 2626 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
2501 0x1F }; 2627 0x1F };
@@ -2503,6 +2629,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2503 u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F }; 2629 u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
2504 u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; 2630 u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
2505 2631
2632 u16 vmids[5][4] = {
2633 { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
2634 { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
2635 { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
2636 { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
2637 { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
2638 };
2639 u16 gains[5][4] = {
2640 { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
2641 { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
2642 { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
2643 { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
2644 { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
2645 };
2646 u16 *vmid, *gain;
2647
2648 u8 pdet_range;
2506 u16 tmp16; 2649 u16 tmp16;
2507 u32 tmp32; 2650 u32 tmp32;
2508 2651
@@ -2561,7 +2704,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2561 b43_ntab_write(dev, B43_NTAB16(8, 0), 2); 2704 b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
2562 b43_ntab_write(dev, B43_NTAB16(8, 16), 2); 2705 b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
2563 2706
2564 /* TODO */ 2707 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2708 pdet_range = sprom->fem.ghz2.pdet_range;
2709 else
2710 pdet_range = sprom->fem.ghz5.pdet_range;
2711 vmid = vmids[min_t(u16, pdet_range, 4)];
2712 gain = gains[min_t(u16, pdet_range, 4)];
2713 switch (pdet_range) {
2714 case 3:
2715 if (!(dev->phy.rev >= 4 &&
2716 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
2717 break;
2718 /* FALL THROUGH */
2719 case 0:
2720 case 1:
2721 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
2722 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
2723 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
2724 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
2725 break;
2726 case 2:
2727 if (dev->phy.rev >= 6) {
2728 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2729 vmid[3] = 0x94;
2730 else
2731 vmid[3] = 0x8e;
2732 gain[3] = 3;
2733 } else if (dev->phy.rev == 5) {
2734 vmid[3] = 0x84;
2735 gain[3] = 2;
2736 }
2737 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
2738 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
2739 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
2740 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
2741 break;
2742 case 4:
2743 case 5:
2744 if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
2745 if (pdet_range == 4) {
2746 vmid[3] = 0x8e;
2747 tmp16 = 0x96;
2748 gain[3] = 0x2;
2749 } else {
2750 vmid[3] = 0x89;
2751 tmp16 = 0x89;
2752 gain[3] = 0;
2753 }
2754 } else {
2755 if (pdet_range == 4) {
2756 vmid[3] = 0x89;
2757 tmp16 = 0x8b;
2758 gain[3] = 0x2;
2759 } else {
2760 vmid[3] = 0x74;
2761 tmp16 = 0x70;
2762 gain[3] = 0;
2763 }
2764 }
2765 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
2766 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
2767 vmid[3] = tmp16;
2768 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
2769 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
2770 break;
2771 }
2565 2772
2566 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); 2773 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
2567 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); 2774 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2807,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2600 /* Dropped probably-always-true condition */ 2807 /* Dropped probably-always-true condition */
2601 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb); 2808 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
2602 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb); 2809 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
2603 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341); 2810 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
2604 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341); 2811 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
2605 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b); 2812 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
2606 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b); 2813 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3418,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
3211 u8 idx, delta; 3418 u8 idx, delta;
3212 u8 i, stf_mode; 3419 u8 i, stf_mode;
3213 3420
3421 /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
3422 * 21 groups, each containing 4 entries.
3423 *
3424 * First group has entries for CCK modulation.
3425 * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
3426 *
3427 * Group 0 is for CCK
3428 * Groups 1..4 use BPSK (group per coding rate)
3429 * Groups 5..8 use QPSK (group per coding rate)
3430 * Groups 9..12 use 16-QAM (group per coding rate)
3431 * Groups 13..16 use 64-QAM (group per coding rate)
3432 * Groups 17..20 are unknown
3433 */
3434
3214 for (i = 0; i < 4; i++) 3435 for (i = 0; i < 4; i++)
3215 nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i]; 3436 nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
3216 3437
@@ -3409,10 +3630,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
3409 } 3630 }
3410 3631
3411 b43_nphy_tx_prepare_adjusted_power_table(dev); 3632 b43_nphy_tx_prepare_adjusted_power_table(dev);
3412 /*
3413 b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl); 3633 b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
3414 b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl); 3634 b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
3415 */
3416 3635
3417 if (nphy->hang_avoid) 3636 if (nphy->hang_avoid)
3418 b43_nphy_stay_in_carrier_search(dev, false); 3637 b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5343,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
5124 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015); 5343 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
5125 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320); 5344 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
5126 if (phy->rev >= 3 && phy->rev <= 6) 5345 if (phy->rev >= 3 && phy->rev <= 6)
5127 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014); 5346 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
5128 b43_nphy_tx_lp_fbw(dev); 5347 b43_nphy_tx_lp_fbw(dev);
5129 if (phy->rev >= 3) 5348 if (phy->rev >= 3)
5130 b43_nphy_spur_workaround(dev); 5349 b43_nphy_spur_workaround(dev);
@@ -5338,7 +5557,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
5338 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); 5557 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
5339 nphy->spur_avoid = (phy->rev >= 3) ? 5558 nphy->spur_avoid = (phy->rev >= 3) ?
5340 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; 5559 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
5341 nphy->init_por = true;
5342 nphy->gain_boost = true; /* this way we follow wl, assume it is true */ 5560 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
5343 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ 5561 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
5344 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ 5562 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -5379,8 +5597,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
5379 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; 5597 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
5380 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; 5598 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
5381 } 5599 }
5382
5383 nphy->init_por = true;
5384} 5600}
5385 5601
5386static void b43_nphy_op_free(struct b43_wldev *dev) 5602static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -5441,8 +5657,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
5441{ 5657{
5442 /* Register 1 is a 32-bit register. */ 5658 /* Register 1 is a 32-bit register. */
5443 B43_WARN_ON(reg == 1); 5659 B43_WARN_ON(reg == 1);
5444 /* N-PHY needs 0x100 for read access */ 5660
5445 reg |= 0x100; 5661 if (dev->phy.rev >= 7)
5662 reg |= 0x200; /* Radio 0x2057 */
5663 else
5664 reg |= 0x100;
5446 5665
5447 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); 5666 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
5448 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); 5667 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
@@ -5488,10 +5707,12 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
5488 } 5707 }
5489 } else { 5708 } else {
5490 if (dev->phy.rev >= 7) { 5709 if (dev->phy.rev >= 7) {
5491 b43_radio_2057_init(dev); 5710 if (!dev->phy.radio_on)
5711 b43_radio_2057_init(dev);
5492 b43_switch_channel(dev, dev->phy.channel); 5712 b43_switch_channel(dev, dev->phy.channel);
5493 } else if (dev->phy.rev >= 3) { 5713 } else if (dev->phy.rev >= 3) {
5494 b43_radio_init2056(dev); 5714 if (!dev->phy.radio_on)
5715 b43_radio_init2056(dev);
5495 b43_switch_channel(dev, dev->phy.channel); 5716 b43_switch_channel(dev, dev->phy.channel);
5496 } else { 5717 } else {
5497 b43_radio_init2055(dev); 5718 b43_radio_init2055(dev);
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 9a5b6bc27d24..ecfbf66dbc3b 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -931,7 +931,6 @@ struct b43_phy_n {
931 u16 papd_epsilon_offset[2]; 931 u16 papd_epsilon_offset[2];
932 s32 preamble_override; 932 s32 preamble_override;
933 u32 bb_mult_save; 933 u32 bb_mult_save;
934 bool init_por;
935 934
936 bool gain_boost; 935 bool gain_boost;
937 bool elna_gain_config; 936 bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index b4fd9345d673..2ce25607c60d 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
48 unsigned int rx_length; 48 unsigned int rx_length;
49}; 49};
50 50
51static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = { 51static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
52 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 52 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
53 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 53 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
54 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 54 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
232 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 232 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
233}; 233};
234 234
235static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = { 235static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
236 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 236 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
237 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 237 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
238 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 238 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
380 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 380 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
381}; 381};
382 382
383static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = { 383static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
384 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 384 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
385 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 385 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
386 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 386 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
530 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 530 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
531}; 531};
532 532
533static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = { 533static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
534 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 534 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
535 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 535 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
536 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 536 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
714 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 714 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
715}; 715};
716 716
717static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = { 717static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
718 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 718 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
719 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 719 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
720 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 720 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
862 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 862 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
863}; 863};
864 864
865static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = { 865static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
866 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 866 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
867 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 867 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
868 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 868 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
1012 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1012 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1013}; 1013};
1014 1014
1015static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = { 1015static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
1016 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1016 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1017 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1017 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1018 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1018 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
1196 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1196 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1197}; 1197};
1198 1198
1199static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = { 1199static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
1200 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1200 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1201 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1201 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1202 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1202 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
1352 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, }, 1352 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
1353}; 1353};
1354 1354
1355static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = { 1355static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
1356 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1356 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1357 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1357 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1358 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1358 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
1502 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1502 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1503}; 1503};
1504 1504
1505static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = { 1505static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
1506 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1506 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1507 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1507 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1508 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1508 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
1686 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1686 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1687}; 1687};
1688 1688
1689static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = { 1689static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
1690 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1690 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1691 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1691 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1692 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1692 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
1842 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, }, 1842 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1843}; 1843};
1844 1844
1845static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = { 1845static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
1846 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1846 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1847 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1847 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1848 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1848 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
1992 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1992 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1993}; 1993};
1994 1994
1995static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = { 1995static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
1996 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1996 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1997 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1997 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1998 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 1998 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
2176 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2176 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2177}; 2177};
2178 2178
2179static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = { 2179static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
2180 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2180 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2181 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2181 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2182 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2182 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
2332 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, }, 2332 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
2333}; 2333};
2334 2334
2335static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = { 2335static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
2336 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2336 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2337 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2337 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2338 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2338 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
2482 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2482 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2483}; 2483};
2484 2484
2485static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = { 2485static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
2486 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2486 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2487 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2487 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2488 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2488 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
2666 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2666 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2667}; 2667};
2668 2668
2669static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = { 2669static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
2670 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2670 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2671 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2671 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2672 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2672 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
2822 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, }, 2822 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2823}; 2823};
2824 2824
2825static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = { 2825static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
2826 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2826 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2827 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2827 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2828 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2828 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
2972 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 2972 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2973}; 2973};
2974 2974
2975#define INITTABSPTS(prefix) \ 2975static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
2976 .syn = prefix##_syn, \ 2976 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
2977 .syn_length = ARRAY_SIZE(prefix##_syn), \ 2977 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
2978 .tx = prefix##_tx, \ 2978 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
2979 .tx_length = ARRAY_SIZE(prefix##_tx), \ 2979 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
2980 .rx = prefix##_rx, \ 2980 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
2981 .rx_length = ARRAY_SIZE(prefix##_rx) 2981 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
2982 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
2983 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
2984};
2982 2985
2983static const struct b2056_inittabs_pts b2056_inittabs[] = { 2986static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
2984 [3] = { INITTABSPTS(b2056_inittab_rev3) }, 2987 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
2985 [4] = { INITTABSPTS(b2056_inittab_rev4) }, 2988 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2986 [5] = { INITTABSPTS(b2056_inittab_rev5) }, 2989 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2987 [6] = { INITTABSPTS(b2056_inittab_rev6) }, 2990 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2988 [7] = { INITTABSPTS(b2056_inittab_rev7) }, 2991 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2989 [8] = { INITTABSPTS(b2056_inittab_rev8) }, 2992 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2990 [9] = { INITTABSPTS(b2056_inittab_rev7) }, 2993 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2994 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2995 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2996 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2997 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2998 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2999 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
3000};
3001
3002static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
3003 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
3004 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
3005 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
3006 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
3007 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
3008 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
3009 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
3010 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
3011 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
3012 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
3013 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
3014 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
3015 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
3016 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
3017 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
2991}; 3018};
2992 3019
3020#define INITTABSPTS(prefix) \
3021 static const struct b2056_inittabs_pts prefix = { \
3022 .syn = prefix##_syn, \
3023 .syn_length = ARRAY_SIZE(prefix##_syn), \
3024 .tx = prefix##_tx, \
3025 .tx_length = ARRAY_SIZE(prefix##_tx), \
3026 .rx = prefix##_rx, \
3027 .rx_length = ARRAY_SIZE(prefix##_rx), \
3028 }
3029
3030INITTABSPTS(b2056_inittab_phy_rev3);
3031INITTABSPTS(b2056_inittab_phy_rev4);
3032INITTABSPTS(b2056_inittab_radio_rev5);
3033INITTABSPTS(b2056_inittab_radio_rev6);
3034INITTABSPTS(b2056_inittab_radio_rev7_9);
3035INITTABSPTS(b2056_inittab_radio_rev8);
3036INITTABSPTS(b2056_inittab_radio_rev11);
3037
2993#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \ 3038#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
2994 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \ 3039 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
2995 r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \ 3040 r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
3041 .phy_regs.phy_bw6 = r5 3086 .phy_regs.phy_bw6 = r5
3042 3087
3043/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */ 3088/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
3044static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = { 3089static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
3045 { .freq = 4920, 3090 { .freq = 4920,
3046 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 3091 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
3047 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 3092 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
4036 }, 4081 },
4037}; 4082};
4038 4083
4039static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = { 4084static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
4040 { .freq = 4920, 4085 { .freq = 4920,
4041 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 4086 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
4042 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 4087 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
5031 }, 5076 },
5032}; 5077};
5033 5078
5034static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = { 5079static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
5035 { .freq = 4920, 5080 { .freq = 4920,
5036 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 5081 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
5037 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 5082 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
6026 }, 6071 },
6027}; 6072};
6028 6073
6029static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = { 6074static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
6030 { .freq = 4920, 6075 { .freq = 4920,
6031 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 6076 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
6032 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 6077 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
7021 }, 7066 },
7022}; 7067};
7023 7068
7024static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = { 7069static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
7025 { .freq = 4920, 7070 { .freq = 4920,
7026 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 7071 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
7027 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 7072 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
8016 }, 8061 },
8017}; 8062};
8018 8063
8019static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = { 8064static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
8020 { .freq = 4920, 8065 { .freq = 4920,
8021 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 8066 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
8022 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 8067 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,1154 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
9011 }, 9056 },
9012}; 9057};
9013 9058
9059static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
9060 {
9061 .freq = 4920,
9062 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02,
9063 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9064 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9065 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9066 0x00, 0x0f, 0x00, 0x6f, 0x00),
9067 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
9068 },
9069 {
9070 .freq = 4930,
9071 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x02,
9072 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9073 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9074 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9075 0x00, 0x0f, 0x00, 0x6f, 0x00),
9076 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
9077 },
9078 {
9079 .freq = 4940,
9080 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x02,
9081 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9082 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9083 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9084 0x00, 0x0f, 0x00, 0x6f, 0x00),
9085 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
9086 },
9087 {
9088 .freq = 4950,
9089 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x02,
9090 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
9091 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9092 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9093 0x00, 0x0f, 0x00, 0x6f, 0x00),
9094 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
9095 },
9096 {
9097 .freq = 4960,
9098 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x02,
9099 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9100 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9101 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9102 0x00, 0x0f, 0x00, 0x6f, 0x00),
9103 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
9104 },
9105 {
9106 .freq = 4970,
9107 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x02,
9108 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9109 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9110 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9111 0x00, 0x0f, 0x00, 0x6f, 0x00),
9112 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
9113 },
9114 {
9115 .freq = 4980,
9116 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x02,
9117 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9118 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9119 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9120 0x00, 0x0f, 0x00, 0x6f, 0x00),
9121 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
9122 },
9123 {
9124 .freq = 4990,
9125 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x02,
9126 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9127 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9128 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9129 0x00, 0x0f, 0x00, 0x6f, 0x00),
9130 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
9131 },
9132 {
9133 .freq = 5000,
9134 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x02,
9135 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9136 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9137 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9138 0x00, 0x0f, 0x00, 0x6f, 0x00),
9139 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
9140 },
9141 {
9142 .freq = 5010,
9143 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x02,
9144 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9145 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9146 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9147 0x00, 0x0f, 0x00, 0x6f, 0x00),
9148 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
9149 },
9150 {
9151 .freq = 5020,
9152 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x02,
9153 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9154 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9155 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9156 0x00, 0x0f, 0x00, 0x6f, 0x00),
9157 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
9158 },
9159 {
9160 .freq = 5030,
9161 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x02,
9162 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9163 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9164 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9165 0x00, 0x0f, 0x00, 0x6f, 0x00),
9166 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
9167 },
9168 {
9169 .freq = 5040,
9170 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x02,
9171 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9172 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9173 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9174 0x00, 0x0f, 0x00, 0x6f, 0x00),
9175 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
9176 },
9177 {
9178 .freq = 5050,
9179 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x02,
9180 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9181 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9182 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9183 0x00, 0x0f, 0x00, 0x6f, 0x00),
9184 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
9185 },
9186 {
9187 .freq = 5060,
9188 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x02,
9189 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9190 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9191 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
9192 0x00, 0x0f, 0x00, 0x6f, 0x00),
9193 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
9194 },
9195 {
9196 .freq = 5070,
9197 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x02,
9198 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9199 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9200 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
9201 0x00, 0x0f, 0x00, 0x6f, 0x00),
9202 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
9203 },
9204 {
9205 .freq = 5080,
9206 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x02,
9207 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9208 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9209 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
9210 0x00, 0x0f, 0x00, 0x6f, 0x00),
9211 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
9212 },
9213 {
9214 .freq = 5090,
9215 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x02,
9216 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
9217 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
9218 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
9219 0x00, 0x0f, 0x00, 0x6f, 0x00),
9220 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
9221 },
9222 {
9223 .freq = 5100,
9224 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x02,
9225 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9226 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9227 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
9228 0x00, 0x0f, 0x00, 0x6f, 0x00),
9229 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
9230 },
9231 {
9232 .freq = 5110,
9233 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x02,
9234 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9235 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9236 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
9237 0x00, 0x0f, 0x00, 0x6f, 0x00),
9238 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
9239 },
9240 {
9241 .freq = 5120,
9242 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x02,
9243 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9244 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9245 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
9246 0x00, 0x0f, 0x00, 0x6f, 0x00),
9247 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
9248 },
9249 {
9250 .freq = 5130,
9251 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x02,
9252 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9253 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9254 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
9255 0x00, 0x0f, 0x00, 0x6f, 0x00),
9256 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
9257 },
9258 {
9259 .freq = 5140,
9260 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x02,
9261 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9262 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
9263 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
9264 0x00, 0x0f, 0x00, 0x6f, 0x00),
9265 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
9266 },
9267 {
9268 .freq = 5160,
9269 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x02,
9270 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9271 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
9272 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
9273 0x00, 0x0e, 0x00, 0x6f, 0x00),
9274 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
9275 },
9276 {
9277 .freq = 5170,
9278 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x02,
9279 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9280 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
9281 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
9282 0x00, 0x0e, 0x00, 0x6f, 0x00),
9283 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
9284 },
9285 {
9286 .freq = 5180,
9287 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
9288 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9289 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
9290 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
9291 0x00, 0x0e, 0x00, 0x6f, 0x00),
9292 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
9293 },
9294 {
9295 .freq = 5190,
9296 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x02,
9297 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9298 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
9299 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
9300 0x00, 0x0d, 0x00, 0x6f, 0x00),
9301 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
9302 },
9303 {
9304 .freq = 5200,
9305 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
9306 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9307 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9308 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
9309 0x00, 0x0d, 0x00, 0x6f, 0x00),
9310 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
9311 },
9312 {
9313 .freq = 5210,
9314 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x02,
9315 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
9316 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9317 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
9318 0x00, 0x0d, 0x00, 0x6f, 0x00),
9319 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
9320 },
9321 {
9322 .freq = 5220,
9323 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
9324 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9325 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9326 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
9327 0x00, 0x0d, 0x00, 0x6f, 0x00),
9328 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
9329 },
9330 {
9331 .freq = 5230,
9332 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x02,
9333 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9334 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9335 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
9336 0x00, 0x0d, 0x00, 0x6f, 0x00),
9337 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
9338 },
9339 {
9340 .freq = 5240,
9341 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x02,
9342 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9343 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9344 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
9345 0x00, 0x0d, 0x00, 0x6f, 0x00),
9346 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
9347 },
9348 {
9349 .freq = 5250,
9350 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x02,
9351 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
9352 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
9353 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
9354 0x00, 0x0d, 0x00, 0x6f, 0x00),
9355 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
9356 },
9357 {
9358 .freq = 5260,
9359 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x02,
9360 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
9361 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
9362 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
9363 0x00, 0x0d, 0x00, 0x6f, 0x00),
9364 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
9365 },
9366 {
9367 .freq = 5270,
9368 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x02,
9369 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
9370 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
9371 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
9372 0x00, 0x0c, 0x00, 0x6f, 0x00),
9373 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
9374 },
9375 {
9376 .freq = 5280,
9377 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x02,
9378 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9379 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9380 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9381 0x00, 0x0c, 0x00, 0x6f, 0x00),
9382 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
9383 },
9384 {
9385 .freq = 5290,
9386 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x02,
9387 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9388 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9389 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9390 0x00, 0x0c, 0x00, 0x6f, 0x00),
9391 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
9392 },
9393 {
9394 .freq = 5300,
9395 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x02,
9396 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9397 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9398 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9399 0x00, 0x0c, 0x00, 0x6f, 0x00),
9400 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
9401 },
9402 {
9403 .freq = 5310,
9404 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x02,
9405 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9406 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9407 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9408 0x00, 0x0c, 0x00, 0x6f, 0x00),
9409 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
9410 },
9411 {
9412 .freq = 5320,
9413 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x02,
9414 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
9415 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
9416 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
9417 0x00, 0x0c, 0x00, 0x6f, 0x00),
9418 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
9419 },
9420 {
9421 .freq = 5330,
9422 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x02,
9423 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
9424 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
9425 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
9426 0x00, 0x0b, 0x00, 0x6f, 0x00),
9427 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
9428 },
9429 {
9430 .freq = 5340,
9431 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x02,
9432 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
9433 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
9434 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
9435 0x00, 0x0b, 0x00, 0x6f, 0x00),
9436 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
9437 },
9438 {
9439 .freq = 5350,
9440 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x02,
9441 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
9442 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
9443 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
9444 0x00, 0x0b, 0x00, 0x6f, 0x00),
9445 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
9446 },
9447 {
9448 .freq = 5360,
9449 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x02,
9450 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
9451 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9452 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
9453 0x00, 0x0a, 0x00, 0x6f, 0x00),
9454 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
9455 },
9456 {
9457 .freq = 5370,
9458 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x02,
9459 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
9460 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9461 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
9462 0x00, 0x0a, 0x00, 0x6f, 0x00),
9463 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
9464 },
9465 {
9466 .freq = 5380,
9467 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x02,
9468 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9469 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9470 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
9471 0x00, 0x0a, 0x00, 0x6f, 0x00),
9472 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
9473 },
9474 {
9475 .freq = 5390,
9476 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x02,
9477 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9478 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9479 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
9480 0x00, 0x0a, 0x00, 0x6f, 0x00),
9481 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
9482 },
9483 {
9484 .freq = 5400,
9485 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x02,
9486 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9487 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
9488 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
9489 0x00, 0x0a, 0x00, 0x6f, 0x00),
9490 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
9491 },
9492 {
9493 .freq = 5410,
9494 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x02,
9495 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9496 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
9497 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9498 0x00, 0x0a, 0x00, 0x6f, 0x00),
9499 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
9500 },
9501 {
9502 .freq = 5420,
9503 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x02,
9504 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
9505 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
9506 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9507 0x00, 0x0a, 0x00, 0x6f, 0x00),
9508 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
9509 },
9510 {
9511 .freq = 5430,
9512 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x02,
9513 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
9514 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
9515 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9516 0x00, 0x0a, 0x00, 0x6f, 0x00),
9517 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
9518 },
9519 {
9520 .freq = 5440,
9521 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x02,
9522 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9523 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
9524 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
9525 0x00, 0x09, 0x00, 0x6f, 0x00),
9526 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
9527 },
9528 {
9529 .freq = 5450,
9530 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x02,
9531 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9532 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
9533 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
9534 0x00, 0x09, 0x00, 0x6f, 0x00),
9535 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
9536 },
9537 {
9538 .freq = 5460,
9539 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x02,
9540 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9541 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
9542 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
9543 0x00, 0x09, 0x00, 0x6f, 0x00),
9544 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
9545 },
9546 {
9547 .freq = 5470,
9548 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x02,
9549 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
9550 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
9551 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
9552 0x00, 0x09, 0x00, 0x6f, 0x00),
9553 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
9554 },
9555 {
9556 .freq = 5480,
9557 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x02,
9558 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9559 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9560 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9561 0x00, 0x09, 0x00, 0x6f, 0x00),
9562 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
9563 },
9564 {
9565 .freq = 5490,
9566 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x02,
9567 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9568 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9569 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9570 0x00, 0x09, 0x00, 0x6f, 0x00),
9571 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
9572 },
9573 {
9574 .freq = 5500,
9575 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x02,
9576 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9577 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9578 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9579 0x00, 0x09, 0x00, 0x6f, 0x00),
9580 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
9581 },
9582 {
9583 .freq = 5510,
9584 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x02,
9585 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9586 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9587 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9588 0x00, 0x09, 0x00, 0x6f, 0x00),
9589 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
9590 },
9591 {
9592 .freq = 5520,
9593 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x02,
9594 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
9595 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9596 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9597 0x00, 0x09, 0x00, 0x6f, 0x00),
9598 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
9599 },
9600 {
9601 .freq = 5530,
9602 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x02,
9603 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
9604 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9605 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9606 0x00, 0x09, 0x00, 0x6f, 0x00),
9607 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
9608 },
9609 {
9610 .freq = 5540,
9611 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x02,
9612 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
9613 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9614 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9615 0x00, 0x09, 0x00, 0x6f, 0x00),
9616 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
9617 },
9618 {
9619 .freq = 5550,
9620 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x02,
9621 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
9622 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9623 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9624 0x00, 0x09, 0x00, 0x6f, 0x00),
9625 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
9626 },
9627 {
9628 .freq = 5560,
9629 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x02,
9630 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
9631 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9632 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
9633 0x00, 0x09, 0x00, 0x6f, 0x00),
9634 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
9635 },
9636 {
9637 .freq = 5570,
9638 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x02,
9639 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
9640 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
9641 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
9642 0x00, 0x09, 0x00, 0x6f, 0x00),
9643 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
9644 },
9645 {
9646 .freq = 5580,
9647 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x02,
9648 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
9649 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9650 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
9651 0x00, 0x08, 0x00, 0x6f, 0x00),
9652 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
9653 },
9654 {
9655 .freq = 5590,
9656 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x02,
9657 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
9658 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9659 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
9660 0x00, 0x08, 0x00, 0x6f, 0x00),
9661 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
9662 },
9663 {
9664 .freq = 5600,
9665 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x02,
9666 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
9667 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9668 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
9669 0x00, 0x08, 0x00, 0x6f, 0x00),
9670 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
9671 },
9672 {
9673 .freq = 5610,
9674 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x02,
9675 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
9676 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
9677 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
9678 0x00, 0x08, 0x00, 0x6f, 0x00),
9679 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
9680 },
9681 {
9682 .freq = 5620,
9683 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x02,
9684 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
9685 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9686 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
9687 0x00, 0x07, 0x00, 0x6f, 0x00),
9688 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
9689 },
9690 {
9691 .freq = 5630,
9692 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x02,
9693 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9694 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9695 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
9696 0x00, 0x07, 0x00, 0x6f, 0x00),
9697 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
9698 },
9699 {
9700 .freq = 5640,
9701 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x02,
9702 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9703 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9704 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
9705 0x00, 0x07, 0x00, 0x6f, 0x00),
9706 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
9707 },
9708 {
9709 .freq = 5650,
9710 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x02,
9711 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9712 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
9713 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
9714 0x00, 0x07, 0x00, 0x6f, 0x00),
9715 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
9716 },
9717 {
9718 .freq = 5660,
9719 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x02,
9720 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9721 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9722 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
9723 0x00, 0x06, 0x00, 0x6f, 0x00),
9724 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
9725 },
9726 {
9727 .freq = 5670,
9728 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x02,
9729 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
9730 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9731 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9732 0x00, 0x06, 0x00, 0x6f, 0x00),
9733 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
9734 },
9735 {
9736 .freq = 5680,
9737 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x02,
9738 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9739 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9740 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9741 0x00, 0x06, 0x00, 0x6f, 0x00),
9742 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
9743 },
9744 {
9745 .freq = 5690,
9746 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x02,
9747 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9748 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9749 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9750 0x00, 0x06, 0x00, 0x6f, 0x00),
9751 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
9752 },
9753 {
9754 .freq = 5700,
9755 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x02,
9756 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9757 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9758 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9759 0x00, 0x06, 0x00, 0x6e, 0x00),
9760 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
9761 },
9762 {
9763 .freq = 5710,
9764 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x02,
9765 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9766 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9767 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9768 0x00, 0x06, 0x00, 0x6e, 0x00),
9769 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
9770 },
9771 {
9772 .freq = 5720,
9773 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x02,
9774 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9775 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9776 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9777 0x00, 0x06, 0x00, 0x6e, 0x00),
9778 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
9779 },
9780 {
9781 .freq = 5725,
9782 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x05, 0x05, 0x02,
9783 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
9784 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9785 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9786 0x00, 0x06, 0x00, 0x6e, 0x00),
9787 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
9788 },
9789 {
9790 .freq = 5730,
9791 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x02,
9792 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9793 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9794 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9795 0x00, 0x06, 0x00, 0x6e, 0x00),
9796 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
9797 },
9798 {
9799 .freq = 5735,
9800 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x05, 0x05, 0x02,
9801 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9802 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9803 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9804 0x00, 0x06, 0x00, 0x6d, 0x00),
9805 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
9806 },
9807 {
9808 .freq = 5740,
9809 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x02,
9810 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9811 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9812 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9813 0x00, 0x06, 0x00, 0x6d, 0x00),
9814 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
9815 },
9816 {
9817 .freq = 5745,
9818 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
9819 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9820 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
9821 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
9822 0x00, 0x06, 0x00, 0x6d, 0x00),
9823 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
9824 },
9825 {
9826 .freq = 5750,
9827 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x02,
9828 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9829 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9830 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
9831 0x00, 0x05, 0x00, 0x6d, 0x00),
9832 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
9833 },
9834 {
9835 .freq = 5755,
9836 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x05, 0x05, 0x02,
9837 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
9838 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9839 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
9840 0x00, 0x05, 0x00, 0x6c, 0x00),
9841 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
9842 },
9843 {
9844 .freq = 5760,
9845 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x02,
9846 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
9847 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9848 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
9849 0x00, 0x05, 0x00, 0x6c, 0x00),
9850 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
9851 },
9852 {
9853 .freq = 5765,
9854 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
9855 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
9856 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9857 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9858 0x00, 0x05, 0x00, 0x6c, 0x00),
9859 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
9860 },
9861 {
9862 .freq = 5770,
9863 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x02,
9864 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
9865 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9866 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9867 0x00, 0x05, 0x00, 0x6b, 0x00),
9868 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
9869 },
9870 {
9871 .freq = 5775,
9872 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x05, 0x05, 0x02,
9873 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
9874 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9875 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9876 0x00, 0x05, 0x00, 0x6b, 0x00),
9877 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
9878 },
9879 {
9880 .freq = 5780,
9881 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x02,
9882 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
9883 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9884 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9885 0x00, 0x05, 0x00, 0x6b, 0x00),
9886 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
9887 },
9888 {
9889 .freq = 5785,
9890 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
9891 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9892 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9893 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9894 0x00, 0x05, 0x00, 0x6b, 0x00),
9895 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
9896 },
9897 {
9898 .freq = 5790,
9899 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x02,
9900 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9901 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9902 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
9903 0x00, 0x05, 0x00, 0x6b, 0x00),
9904 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
9905 },
9906 {
9907 .freq = 5795,
9908 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x05, 0x05, 0x02,
9909 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9910 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9911 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9912 0x00, 0x05, 0x00, 0x6b, 0x00),
9913 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
9914 },
9915 {
9916 .freq = 5800,
9917 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x02,
9918 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9919 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9920 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9921 0x00, 0x05, 0x00, 0x6b, 0x00),
9922 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
9923 },
9924 {
9925 .freq = 5805,
9926 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
9927 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9928 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9929 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9930 0x00, 0x05, 0x00, 0x6a, 0x00),
9931 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
9932 },
9933 {
9934 .freq = 5810,
9935 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x02,
9936 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9937 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9938 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9939 0x00, 0x05, 0x00, 0x6a, 0x00),
9940 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
9941 },
9942 {
9943 .freq = 5815,
9944 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x05, 0x05, 0x02,
9945 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9946 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9947 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9948 0x00, 0x05, 0x00, 0x6a, 0x00),
9949 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
9950 },
9951 {
9952 .freq = 5820,
9953 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x02,
9954 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9955 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9956 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9957 0x00, 0x05, 0x00, 0x6a, 0x00),
9958 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
9959 },
9960 {
9961 .freq = 5825,
9962 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
9963 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9964 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9965 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9966 0x00, 0x05, 0x00, 0x69, 0x00),
9967 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
9968 },
9969 {
9970 .freq = 5830,
9971 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x02,
9972 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9973 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
9974 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9975 0x00, 0x05, 0x00, 0x69, 0x00),
9976 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
9977 },
9978 {
9979 .freq = 5840,
9980 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x02,
9981 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
9982 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
9983 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9984 0x00, 0x04, 0x00, 0x69, 0x00),
9985 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
9986 },
9987 {
9988 .freq = 5850,
9989 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x02,
9990 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
9991 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
9992 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
9993 0x00, 0x04, 0x00, 0x69, 0x00),
9994 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
9995 },
9996 {
9997 .freq = 5860,
9998 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x02,
9999 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10000 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10001 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10002 0x00, 0x04, 0x00, 0x69, 0x00),
10003 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
10004 },
10005 {
10006 .freq = 5870,
10007 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x02,
10008 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10009 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10010 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10011 0x00, 0x04, 0x00, 0x68, 0x00),
10012 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
10013 },
10014 {
10015 .freq = 5880,
10016 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x02,
10017 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10018 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10019 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10020 0x00, 0x04, 0x00, 0x68, 0x00),
10021 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
10022 },
10023 {
10024 .freq = 5890,
10025 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x02,
10026 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10027 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10028 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10029 0x00, 0x04, 0x00, 0x68, 0x00),
10030 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
10031 },
10032 {
10033 .freq = 5900,
10034 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x02,
10035 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10036 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10037 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10038 0x00, 0x04, 0x00, 0x68, 0x00),
10039 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
10040 },
10041 {
10042 .freq = 5910,
10043 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x02,
10044 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
10045 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
10046 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
10047 0x00, 0x04, 0x00, 0x68, 0x00),
10048 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
10049 },
10050 {
10051 .freq = 2412,
10052 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
10053 0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
10054 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
10055 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
10056 0x70, 0x00, 0x0b, 0x00, 0x0a),
10057 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
10058 },
10059 {
10060 .freq = 2417,
10061 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
10062 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10063 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
10064 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
10065 0x70, 0x00, 0x0b, 0x00, 0x0a),
10066 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
10067 },
10068 {
10069 .freq = 2422,
10070 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
10071 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10072 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
10073 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
10074 0x70, 0x00, 0x0b, 0x00, 0x0a),
10075 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
10076 },
10077 {
10078 .freq = 2427,
10079 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
10080 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10081 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
10082 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
10083 0x70, 0x00, 0x0a, 0x00, 0x0a),
10084 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
10085 },
10086 {
10087 .freq = 2432,
10088 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
10089 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10090 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
10091 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
10092 0x70, 0x00, 0x0a, 0x00, 0x0a),
10093 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
10094 },
10095 {
10096 .freq = 2437,
10097 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
10098 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10099 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
10100 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
10101 0x70, 0x00, 0x0a, 0x00, 0x0a),
10102 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
10103 },
10104 {
10105 .freq = 2442,
10106 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
10107 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
10108 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
10109 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
10110 0x70, 0x00, 0x0a, 0x00, 0x0a),
10111 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
10112 },
10113 {
10114 .freq = 2447,
10115 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
10116 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10117 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
10118 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
10119 0x70, 0x00, 0x0a, 0x00, 0x09),
10120 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
10121 },
10122 {
10123 .freq = 2452,
10124 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
10125 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10126 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
10127 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
10128 0x70, 0x00, 0x0a, 0x00, 0x09),
10129 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
10130 },
10131 {
10132 .freq = 2457,
10133 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
10134 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10135 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
10136 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
10137 0x70, 0x00, 0x0a, 0x00, 0x09),
10138 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
10139 },
10140 {
10141 .freq = 2462,
10142 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
10143 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10144 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
10145 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
10146 0x70, 0x00, 0x09, 0x00, 0x09),
10147 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
10148 },
10149 {
10150 .freq = 2467,
10151 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
10152 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
10153 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
10154 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
10155 0x70, 0x00, 0x09, 0x00, 0x09),
10156 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
10157 },
10158 {
10159 .freq = 2472,
10160 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
10161 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
10162 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
10163 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
10164 0x70, 0x00, 0x09, 0x00, 0x09),
10165 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
10166 },
10167 {
10168 .freq = 2484,
10169 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
10170 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
10171 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
10172 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
10173 0x70, 0x00, 0x09, 0x00, 0x09),
10174 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
10175 },
10176};
10177
10178static const struct b2056_inittabs_pts
10179*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
10180{
10181 struct b43_phy *phy = &dev->phy;
10182
10183 switch (dev->phy.rev) {
10184 case 3:
10185 return &b2056_inittab_phy_rev3;
10186 case 4:
10187 return &b2056_inittab_phy_rev4;
10188 default:
10189 switch (phy->radio_rev) {
10190 case 5:
10191 return &b2056_inittab_radio_rev5;
10192 case 6:
10193 return &b2056_inittab_radio_rev6;
10194 case 7:
10195 case 9:
10196 return &b2056_inittab_radio_rev7_9;
10197 case 8:
10198 return &b2056_inittab_radio_rev8;
10199 case 11:
10200 return &b2056_inittab_radio_rev11;
10201 }
10202 }
10203
10204 return NULL;
10205}
10206
9014static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5, 10207static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
9015 bool ignore_uploadflag, u16 routing, 10208 bool ignore_uploadflag, u16 routing,
9016 const struct b2056_inittab_entry *e, 10209 const struct b2056_inittab_entry *e,
@@ -9037,11 +10230,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
9037{ 10230{
9038 const struct b2056_inittabs_pts *pts; 10231 const struct b2056_inittabs_pts *pts;
9039 10232
9040 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { 10233 pts = b43_nphy_get_inittabs_rev3(dev);
10234 if (!pts) {
9041 B43_WARN_ON(1); 10235 B43_WARN_ON(1);
9042 return; 10236 return;
9043 } 10237 }
9044 pts = &b2056_inittabs[dev->phy.rev];
9045 10238
9046 b2056_upload_inittab(dev, ghz5, ignore_uploadflag, 10239 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9047 B2056_SYN, pts->syn, pts->syn_length); 10240 B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +10253,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
9060 const struct b2056_inittabs_pts *pts; 10253 const struct b2056_inittabs_pts *pts;
9061 const struct b2056_inittab_entry *e; 10254 const struct b2056_inittab_entry *e;
9062 10255
9063 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { 10256 pts = b43_nphy_get_inittabs_rev3(dev);
10257 if (!pts) {
9064 B43_WARN_ON(1); 10258 B43_WARN_ON(1);
9065 return; 10259 return;
9066 } 10260 }
9067 pts = &b2056_inittabs[dev->phy.rev]; 10261
9068 e = &pts->syn[B2056_SYN_PLL_CP2]; 10262 e = &pts->syn[B2056_SYN_PLL_CP2];
9069 10263
9070 b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2); 10264 b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +10267,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
9073const struct b43_nphy_channeltab_entry_rev3 * 10267const struct b43_nphy_channeltab_entry_rev3 *
9074b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) 10268b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
9075{ 10269{
10270 struct b43_phy *phy = &dev->phy;
9076 const struct b43_nphy_channeltab_entry_rev3 *e; 10271 const struct b43_nphy_channeltab_entry_rev3 *e;
9077 unsigned int length, i; 10272 unsigned int length, i;
9078 10273
9079 switch (dev->phy.rev) { 10274 switch (phy->rev) {
9080 case 3: 10275 case 3:
9081 e = b43_nphy_channeltab_rev3; 10276 e = b43_nphy_channeltab_phy_rev3;
9082 length = ARRAY_SIZE(b43_nphy_channeltab_rev3); 10277 length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
9083 break; 10278 break;
9084 case 4: 10279 case 4:
9085 e = b43_nphy_channeltab_rev4; 10280 e = b43_nphy_channeltab_phy_rev4;
9086 length = ARRAY_SIZE(b43_nphy_channeltab_rev4); 10281 length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
9087 break;
9088 case 5:
9089 e = b43_nphy_channeltab_rev5;
9090 length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
9091 break;
9092 case 6:
9093 e = b43_nphy_channeltab_rev6;
9094 length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
9095 break;
9096 case 7:
9097 case 9:
9098 e = b43_nphy_channeltab_rev7_9;
9099 length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
9100 break;
9101 case 8:
9102 e = b43_nphy_channeltab_rev8;
9103 length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
9104 break; 10282 break;
9105 default: 10283 default:
9106 B43_WARN_ON(1); 10284 switch (phy->radio_rev) {
9107 return NULL; 10285 case 5:
10286 e = b43_nphy_channeltab_radio_rev5;
10287 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
10288 break;
10289 case 6:
10290 e = b43_nphy_channeltab_radio_rev6;
10291 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
10292 break;
10293 case 7:
10294 case 9:
10295 e = b43_nphy_channeltab_radio_rev7_9;
10296 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
10297 break;
10298 case 8:
10299 e = b43_nphy_channeltab_radio_rev8;
10300 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
10301 break;
10302 case 11:
10303 e = b43_nphy_channeltab_radio_rev11;
10304 length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
10305 break;
10306 default:
10307 B43_WARN_ON(1);
10308 return NULL;
10309 }
9108 } 10310 }
9109 10311
9110 for (i = 0; i < length; i++, e++) { 10312 for (i = 0; i < length; i++, e++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 94c755fdda14..4047c05e3807 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
1627 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be, 1627 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
1628}; 1628};
1629 1629
1630static const u32 b43_ntab_noisevar0_r3[] = { 1630static const u32 b43_ntab_noisevar_r3[] = {
1631 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1632 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1633 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1634 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1635 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1636 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1637 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1638 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1639 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1640 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1641 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1642 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1643 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1644 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1645 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1646 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1647 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1648 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1649 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1650 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1651 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1652 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1653 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1654 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1655 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1656 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1657 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1658 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1659 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1660 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1661 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1662 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1663 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1664 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1665 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1666 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1667 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1668 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1669 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1670 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1671 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1672 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1673 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1674 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1675 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1676 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1677 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1678 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1679 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1680 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1681 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1682 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1683 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1684 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1685 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1686 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1687 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1688 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1689 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1690 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1691 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1692 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1693 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1694 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1695};
1696
1697static const u32 b43_ntab_noisevar1_r3[] = {
1698 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 1631 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1699 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 1632 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1700 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 1633 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3109,31 +3042,32 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
3109 antswlut = sprom->fem.ghz2.antswlut; 3042 antswlut = sprom->fem.ghz2.antswlut;
3110 3043
3111 /* Static tables */ 3044 /* Static tables */
3112 ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); 3045 if (dev->phy.do_full_init) {
3113 ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); 3046 ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
3114 ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3); 3047 ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
3115 ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3); 3048 ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
3116 ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3); 3049 ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
3117 ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3); 3050 ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
3118 ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3); 3051 ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
3119 ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3); 3052 ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
3120 ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3); 3053 ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
3121 ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3); 3054 ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
3122 ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3); 3055 ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
3123 ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3); 3056 ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
3124 ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3); 3057 ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
3125 ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3); 3058 ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
3126 ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3); 3059 ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
3127 ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3); 3060 ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
3128 ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3); 3061 ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
3129 ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3); 3062 ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
3130 ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3); 3063 ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
3131 ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3); 3064 ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
3132 ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3); 3065 ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
3133 ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3); 3066 ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
3134 ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3); 3067 ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
3135 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3); 3068 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
3136 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); 3069 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
3070 }
3137 3071
3138 /* Volatile tables */ 3072 /* Volatile tables */
3139 if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3)) 3073 if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3))
@@ -3146,20 +3080,22 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
3146static void b43_nphy_tables_init_rev0(struct b43_wldev *dev) 3080static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
3147{ 3081{
3148 /* Static tables */ 3082 /* Static tables */
3149 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); 3083 if (dev->phy.do_full_init) {
3150 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); 3084 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
3151 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); 3085 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
3152 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn); 3086 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
3153 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel); 3087 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
3154 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot); 3088 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
3155 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0); 3089 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
3156 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1); 3090 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
3157 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0); 3091 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
3158 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1); 3092 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
3159 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest); 3093 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
3160 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs); 3094 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
3161 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10); 3095 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
3162 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11); 3096 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
3097 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
3098 }
3163 3099
3164 /* Volatile tables */ 3100 /* Volatile tables */
3165 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi); 3101 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9ff33adcff89..3a58aee4c4cf 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
143#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */ 143#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
144#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */ 144#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
145#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */ 145#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
146#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */ 146#define B43_NTAB_NOISEVAR_R3 B43_NTAB32(16, 0) /* noise variance */
147#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
148#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */ 147#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
149#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ 148#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
150#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ 149#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 9b1a038be08b..c218c08fb2f5 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
441 441
442static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */ 442static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
443{ 443{
444 b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480); 444 b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
445} 445}
446 446
447static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) 447static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 31adb8cf0291..4f38f19b8e3d 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -408,7 +408,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
408 mac_ctl |= B43_TXH_MAC_HWSEQ; 408 mac_ctl |= B43_TXH_MAC_HWSEQ;
409 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 409 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
410 mac_ctl |= B43_TXH_MAC_STMSDU; 410 mac_ctl |= B43_TXH_MAC_STMSDU;
411 if (phy->type == B43_PHYTYPE_A) 411 if (!phy->gmode)
412 mac_ctl |= B43_TXH_MAC_5GHZ; 412 mac_ctl |= B43_TXH_MAC_5GHZ;
413 413
414 /* Overwrite rates[0].count to make the retry calculation 414 /* Overwrite rates[0].count to make the retry calculation
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1d2ceac3a221..98e67c18f276 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -33,7 +33,7 @@ brcmfmac-objs += \
33 bcdc.o \ 33 bcdc.o \
34 dhd_common.o \ 34 dhd_common.o \
35 dhd_linux.o \ 35 dhd_linux.o \
36 nvram.o \ 36 firmware.o \
37 btcoex.o 37 btcoex.o
38brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ 38brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
39 dhd_sdio.o \ 39 dhd_sdio.o \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 939d6b132922..16f9ab2568a8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -186,7 +186,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
186void brcmf_txflowblock_if(struct brcmf_if *ifp, 186void brcmf_txflowblock_if(struct brcmf_if *ifp,
187 enum brcmf_netif_stop_reason reason, bool state); 187 enum brcmf_netif_stop_reason reason, bool state);
188u32 brcmf_get_chip_info(struct brcmf_if *ifp); 188u32 brcmf_get_chip_info(struct brcmf_if *ifp);
189void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, 189void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
190 bool success); 190 bool success);
191 191
192/* Sets dongle media info (drv_version, mac address). */ 192/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index c4535616064e..7735328fff21 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -63,7 +63,6 @@ struct brcmf_bus_dcmd {
63 */ 63 */
64struct brcmf_bus_ops { 64struct brcmf_bus_ops {
65 int (*preinit)(struct device *dev); 65 int (*preinit)(struct device *dev);
66 int (*init)(struct device *dev);
67 void (*stop)(struct device *dev); 66 void (*stop)(struct device *dev);
68 int (*txdata)(struct device *dev, struct sk_buff *skb); 67 int (*txdata)(struct device *dev, struct sk_buff *skb);
69 int (*txctl)(struct device *dev, unsigned char *msg, uint len); 68 int (*txctl)(struct device *dev, unsigned char *msg, uint len);
@@ -99,6 +98,7 @@ struct brcmf_bus {
99 unsigned long tx_realloc; 98 unsigned long tx_realloc;
100 u32 chip; 99 u32 chip;
101 u32 chiprev; 100 u32 chiprev;
101 bool always_use_fws_queue;
102 102
103 struct brcmf_bus_ops *ops; 103 struct brcmf_bus_ops *ops;
104}; 104};
@@ -113,11 +113,6 @@ static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
113 return bus->ops->preinit(bus->dev); 113 return bus->ops->preinit(bus->dev);
114} 114}
115 115
116static inline int brcmf_bus_init(struct brcmf_bus *bus)
117{
118 return bus->ops->init(bus->dev);
119}
120
121static inline void brcmf_bus_stop(struct brcmf_bus *bus) 116static inline void brcmf_bus_stop(struct brcmf_bus *bus)
122{ 117{
123 bus->ops->stop(bus->dev); 118 bus->ops->stop(bus->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6a8983a1fb9c..ed3e32ce8c23 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,6 +32,9 @@
32#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 32#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
33#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00" 33#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
34 34
35/* boost value for RSSI_DELTA in preferred join selection */
36#define BRCMF_JOIN_PREF_RSSI_BOOST 8
37
35 38
36bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, 39bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
37 struct sk_buff *pkt, int prec) 40 struct sk_buff *pkt, int prec)
@@ -246,6 +249,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
246{ 249{
247 s8 eventmask[BRCMF_EVENTING_MASK_LEN]; 250 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
248 u8 buf[BRCMF_DCMD_SMLEN]; 251 u8 buf[BRCMF_DCMD_SMLEN];
252 struct brcmf_join_pref_params join_pref_params[2];
249 char *ptr; 253 char *ptr;
250 s32 err; 254 s32 err;
251 255
@@ -298,6 +302,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
298 goto done; 302 goto done;
299 } 303 }
300 304
305 /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
306 join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
307 join_pref_params[0].len = 2;
308 join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
309 join_pref_params[0].band = WLC_BAND_5G;
310 join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
311 join_pref_params[1].len = 2;
312 join_pref_params[1].rssi_gain = 0;
313 join_pref_params[1].band = 0;
314 err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
315 sizeof(join_pref_params));
316 if (err)
317 brcmf_err("Set join_pref error (%d)\n", err);
318
301 /* Setup event_msgs, enable E_IF */ 319 /* Setup event_msgs, enable E_IF */
302 err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask, 320 err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
303 BRCMF_EVENTING_MASK_LEN); 321 BRCMF_EVENTING_MASK_LEN);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 7d28cd385092..09dd8c13d844 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -190,7 +190,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
190 int ret; 190 int ret;
191 struct brcmf_if *ifp = netdev_priv(ndev); 191 struct brcmf_if *ifp = netdev_priv(ndev);
192 struct brcmf_pub *drvr = ifp->drvr; 192 struct brcmf_pub *drvr = ifp->drvr;
193 struct ethhdr *eh; 193 struct ethhdr *eh = (struct ethhdr *)(skb->data);
194 194
195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); 195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
196 196
@@ -236,6 +236,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
236 goto done; 236 goto done;
237 } 237 }
238 238
239 if (eh->h_proto == htons(ETH_P_PAE))
240 atomic_inc(&ifp->pend_8021x_cnt);
241
239 ret = brcmf_fws_process_skb(ifp, skb); 242 ret = brcmf_fws_process_skb(ifp, skb);
240 243
241done: 244done:
@@ -538,31 +541,26 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
538 brcmf_netif_rx(ifp, skb); 541 brcmf_netif_rx(ifp, skb);
539} 542}
540 543
541void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, 544void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
542 bool success) 545 bool success)
543{ 546{
544 struct brcmf_if *ifp; 547 struct brcmf_if *ifp;
545 struct ethhdr *eh; 548 struct ethhdr *eh;
546 u8 ifidx;
547 u16 type; 549 u16 type;
548 int res;
549
550 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
551 550
552 ifp = drvr->iflist[ifidx]; 551 ifp = drvr->iflist[ifidx];
553 if (!ifp) 552 if (!ifp)
554 goto done; 553 goto done;
555 554
556 if (res == 0) { 555 eh = (struct ethhdr *)(txp->data);
557 eh = (struct ethhdr *)(txp->data); 556 type = ntohs(eh->h_proto);
558 type = ntohs(eh->h_proto);
559 557
560 if (type == ETH_P_PAE) { 558 if (type == ETH_P_PAE) {
561 atomic_dec(&ifp->pend_8021x_cnt); 559 atomic_dec(&ifp->pend_8021x_cnt);
562 if (waitqueue_active(&ifp->pend_8021x_wait)) 560 if (waitqueue_active(&ifp->pend_8021x_wait))
563 wake_up(&ifp->pend_8021x_wait); 561 wake_up(&ifp->pend_8021x_wait);
564 }
565 } 562 }
563
566 if (!success) 564 if (!success)
567 ifp->stats.tx_errors++; 565 ifp->stats.tx_errors++;
568done: 566done:
@@ -573,13 +571,17 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
573{ 571{
574 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 572 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
575 struct brcmf_pub *drvr = bus_if->drvr; 573 struct brcmf_pub *drvr = bus_if->drvr;
574 u8 ifidx;
576 575
577 /* await txstatus signal for firmware if active */ 576 /* await txstatus signal for firmware if active */
578 if (brcmf_fws_fc_active(drvr->fws)) { 577 if (brcmf_fws_fc_active(drvr->fws)) {
579 if (!success) 578 if (!success)
580 brcmf_fws_bustxfail(drvr->fws, txp); 579 brcmf_fws_bustxfail(drvr->fws, txp);
581 } else { 580 } else {
582 brcmf_txfinalize(drvr, txp, success); 581 if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
582 brcmu_pkt_buf_free_skb(txp);
583 else
584 brcmf_txfinalize(drvr, txp, ifidx, success);
583 } 585 }
584} 586}
585 587
@@ -914,13 +916,6 @@ int brcmf_bus_start(struct device *dev)
914 916
915 brcmf_dbg(TRACE, "\n"); 917 brcmf_dbg(TRACE, "\n");
916 918
917 /* Bring up the bus */
918 ret = brcmf_bus_init(bus_if);
919 if (ret != 0) {
920 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
921 return ret;
922 }
923
924 /* add primary networking interface */ 919 /* add primary networking interface */
925 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL); 920 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
926 if (IS_ERR(ifp)) 921 if (IS_ERR(ifp))
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 13c89a0c4ba7..8fa0dbbbda72 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -42,7 +42,7 @@
42#include <soc.h> 42#include <soc.h>
43#include "sdio_host.h" 43#include "sdio_host.h"
44#include "chip.h" 44#include "chip.h"
45#include "nvram.h" 45#include "firmware.h"
46 46
47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */ 47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
48 48
@@ -632,43 +632,28 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
632 { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) } 632 { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
633}; 633};
634 634
635 635static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
636static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus, 636 enum brcmf_firmware_type type)
637 enum brcmf_firmware_type type)
638{ 637{
639 const struct firmware *fw; 638 int i;
640 const char *name;
641 int err, i;
642 639
643 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) { 640 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
644 if (brcmf_fwname_data[i].chipid == bus->ci->chip && 641 if (brcmf_fwname_data[i].chipid == ci->chip &&
645 brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) { 642 brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
646 switch (type) { 643 switch (type) {
647 case BRCMF_FIRMWARE_BIN: 644 case BRCMF_FIRMWARE_BIN:
648 name = brcmf_fwname_data[i].bin; 645 return brcmf_fwname_data[i].bin;
649 break;
650 case BRCMF_FIRMWARE_NVRAM: 646 case BRCMF_FIRMWARE_NVRAM:
651 name = brcmf_fwname_data[i].nv; 647 return brcmf_fwname_data[i].nv;
652 break;
653 default: 648 default:
654 brcmf_err("invalid firmware type (%d)\n", type); 649 brcmf_err("invalid firmware type (%d)\n", type);
655 return NULL; 650 return NULL;
656 } 651 }
657 goto found;
658 } 652 }
659 } 653 }
660 brcmf_err("Unknown chipid %d [%d]\n", 654 brcmf_err("Unknown chipid %d [%d]\n",
661 bus->ci->chip, bus->ci->chiprev); 655 ci->chip, ci->chiprev);
662 return NULL; 656 return NULL;
663
664found:
665 err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
666 if ((err) || (!fw)) {
667 brcmf_err("fail to request firmware %s (%d)\n", name, err);
668 return NULL;
669 }
670
671 return fw;
672} 657}
673 658
674static void pkt_align(struct sk_buff *p, int len, int align) 659static void pkt_align(struct sk_buff *p, int len, int align)
@@ -3278,20 +3263,13 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3278} 3263}
3279 3264
3280static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus, 3265static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3281 const struct firmware *nv) 3266 void *vars, u32 varsz)
3282{ 3267{
3283 void *vars;
3284 u32 varsz;
3285 int address; 3268 int address;
3286 int err; 3269 int err;
3287 3270
3288 brcmf_dbg(TRACE, "Enter\n"); 3271 brcmf_dbg(TRACE, "Enter\n");
3289 3272
3290 vars = brcmf_nvram_strip(nv, &varsz);
3291
3292 if (vars == NULL)
3293 return -EINVAL;
3294
3295 address = bus->ci->ramsize - varsz + bus->ci->rambase; 3273 address = bus->ci->ramsize - varsz + bus->ci->rambase;
3296 err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz); 3274 err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
3297 if (err) 3275 if (err)
@@ -3300,15 +3278,14 @@ static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3300 else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz)) 3278 else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
3301 err = -EIO; 3279 err = -EIO;
3302 3280
3303 brcmf_nvram_free(vars);
3304
3305 return err; 3281 return err;
3306} 3282}
3307 3283
3308static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus) 3284static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3285 const struct firmware *fw,
3286 void *nvram, u32 nvlen)
3309{ 3287{
3310 int bcmerror = -EFAULT; 3288 int bcmerror = -EFAULT;
3311 const struct firmware *fw;
3312 u32 rstvec; 3289 u32 rstvec;
3313 3290
3314 sdio_claim_host(bus->sdiodev->func[1]); 3291 sdio_claim_host(bus->sdiodev->func[1]);
@@ -3317,12 +3294,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3317 /* Keep arm in reset */ 3294 /* Keep arm in reset */
3318 brcmf_chip_enter_download(bus->ci); 3295 brcmf_chip_enter_download(bus->ci);
3319 3296
3320 fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
3321 if (fw == NULL) {
3322 bcmerror = -ENOENT;
3323 goto err;
3324 }
3325
3326 rstvec = get_unaligned_le32(fw->data); 3297 rstvec = get_unaligned_le32(fw->data);
3327 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec); 3298 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3328 3299
@@ -3330,17 +3301,12 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
3330 release_firmware(fw); 3301 release_firmware(fw);
3331 if (bcmerror) { 3302 if (bcmerror) {
3332 brcmf_err("dongle image file download failed\n"); 3303 brcmf_err("dongle image file download failed\n");
3304 brcmf_fw_nvram_free(nvram);
3333 goto err; 3305 goto err;
3334 } 3306 }
3335 3307
3336 fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM); 3308 bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
3337 if (fw == NULL) { 3309 brcmf_fw_nvram_free(nvram);
3338 bcmerror = -ENOENT;
3339 goto err;
3340 }
3341
3342 bcmerror = brcmf_sdio_download_nvram(bus, fw);
3343 release_firmware(fw);
3344 if (bcmerror) { 3310 if (bcmerror) {
3345 brcmf_err("dongle nvram file download failed\n"); 3311 brcmf_err("dongle nvram file download failed\n");
3346 goto err; 3312 goto err;
@@ -3490,97 +3456,6 @@ done:
3490 return err; 3456 return err;
3491} 3457}
3492 3458
3493static int brcmf_sdio_bus_init(struct device *dev)
3494{
3495 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3496 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3497 struct brcmf_sdio *bus = sdiodev->bus;
3498 int err, ret = 0;
3499 u8 saveclk;
3500
3501 brcmf_dbg(TRACE, "Enter\n");
3502
3503 /* try to download image and nvram to the dongle */
3504 if (bus_if->state == BRCMF_BUS_DOWN) {
3505 bus->alp_only = true;
3506 err = brcmf_sdio_download_firmware(bus);
3507 if (err)
3508 return err;
3509 bus->alp_only = false;
3510 }
3511
3512 if (!bus->sdiodev->bus_if->drvr)
3513 return 0;
3514
3515 /* Start the watchdog timer */
3516 bus->sdcnt.tickcnt = 0;
3517 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
3518
3519 sdio_claim_host(bus->sdiodev->func[1]);
3520
3521 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3522 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3523 if (bus->clkstate != CLK_AVAIL)
3524 goto exit;
3525
3526 /* Force clocks on backplane to be sure F2 interrupt propagates */
3527 saveclk = brcmf_sdiod_regrb(bus->sdiodev,
3528 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3529 if (!err) {
3530 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3531 (saveclk | SBSDIO_FORCE_HT), &err);
3532 }
3533 if (err) {
3534 brcmf_err("Failed to force clock for F2: err %d\n", err);
3535 goto exit;
3536 }
3537
3538 /* Enable function 2 (frame transfers) */
3539 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3540 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3541 err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3542
3543
3544 brcmf_dbg(INFO, "enable F2: err=%d\n", err);
3545
3546 /* If F2 successfully enabled, set core and enable interrupts */
3547 if (!err) {
3548 /* Set up the interrupt mask and enable interrupts */
3549 bus->hostintmask = HOSTINTMASK;
3550 w_sdreg32(bus, bus->hostintmask,
3551 offsetof(struct sdpcmd_regs, hostintmask));
3552
3553 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
3554 } else {
3555 /* Disable F2 again */
3556 sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
3557 ret = -ENODEV;
3558 }
3559
3560 if (brcmf_chip_sr_capable(bus->ci)) {
3561 brcmf_sdio_sr_init(bus);
3562 } else {
3563 /* Restore previous clock setting */
3564 brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3565 saveclk, &err);
3566 }
3567
3568 if (ret == 0) {
3569 ret = brcmf_sdiod_intr_register(bus->sdiodev);
3570 if (ret != 0)
3571 brcmf_err("intr register failed:%d\n", ret);
3572 }
3573
3574 /* If we didn't come up, turn off backplane clock */
3575 if (ret != 0)
3576 brcmf_sdio_clkctl(bus, CLK_NONE, false);
3577
3578exit:
3579 sdio_release_host(bus->sdiodev->func[1]);
3580
3581 return ret;
3582}
3583
3584void brcmf_sdio_isr(struct brcmf_sdio *bus) 3459void brcmf_sdio_isr(struct brcmf_sdio *bus)
3585{ 3460{
3586 brcmf_dbg(TRACE, "Enter\n"); 3461 brcmf_dbg(TRACE, "Enter\n");
@@ -4020,13 +3895,114 @@ brcmf_sdio_watchdog(unsigned long data)
4020static struct brcmf_bus_ops brcmf_sdio_bus_ops = { 3895static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
4021 .stop = brcmf_sdio_bus_stop, 3896 .stop = brcmf_sdio_bus_stop,
4022 .preinit = brcmf_sdio_bus_preinit, 3897 .preinit = brcmf_sdio_bus_preinit,
4023 .init = brcmf_sdio_bus_init,
4024 .txdata = brcmf_sdio_bus_txdata, 3898 .txdata = brcmf_sdio_bus_txdata,
4025 .txctl = brcmf_sdio_bus_txctl, 3899 .txctl = brcmf_sdio_bus_txctl,
4026 .rxctl = brcmf_sdio_bus_rxctl, 3900 .rxctl = brcmf_sdio_bus_rxctl,
4027 .gettxq = brcmf_sdio_bus_gettxq, 3901 .gettxq = brcmf_sdio_bus_gettxq,
4028}; 3902};
4029 3903
3904static void brcmf_sdio_firmware_callback(struct device *dev,
3905 const struct firmware *code,
3906 void *nvram, u32 nvram_len)
3907{
3908 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3909 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3910 struct brcmf_sdio *bus = sdiodev->bus;
3911 int err = 0;
3912 u8 saveclk;
3913
3914 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
3915
3916 /* try to download image and nvram to the dongle */
3917 if (bus_if->state == BRCMF_BUS_DOWN) {
3918 bus->alp_only = true;
3919 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
3920 if (err)
3921 goto fail;
3922 bus->alp_only = false;
3923 }
3924
3925 if (!bus_if->drvr)
3926 return;
3927
3928 /* Start the watchdog timer */
3929 bus->sdcnt.tickcnt = 0;
3930 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
3931
3932 sdio_claim_host(sdiodev->func[1]);
3933
3934 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3935 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3936 if (bus->clkstate != CLK_AVAIL)
3937 goto release;
3938
3939 /* Force clocks on backplane to be sure F2 interrupt propagates */
3940 saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
3941 if (!err) {
3942 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3943 (saveclk | SBSDIO_FORCE_HT), &err);
3944 }
3945 if (err) {
3946 brcmf_err("Failed to force clock for F2: err %d\n", err);
3947 goto release;
3948 }
3949
3950 /* Enable function 2 (frame transfers) */
3951 w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
3952 offsetof(struct sdpcmd_regs, tosbmailboxdata));
3953 err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
3954
3955
3956 brcmf_dbg(INFO, "enable F2: err=%d\n", err);
3957
3958 /* If F2 successfully enabled, set core and enable interrupts */
3959 if (!err) {
3960 /* Set up the interrupt mask and enable interrupts */
3961 bus->hostintmask = HOSTINTMASK;
3962 w_sdreg32(bus, bus->hostintmask,
3963 offsetof(struct sdpcmd_regs, hostintmask));
3964
3965 brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
3966 } else {
3967 /* Disable F2 again */
3968 sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
3969 goto release;
3970 }
3971
3972 if (brcmf_chip_sr_capable(bus->ci)) {
3973 brcmf_sdio_sr_init(bus);
3974 } else {
3975 /* Restore previous clock setting */
3976 brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3977 saveclk, &err);
3978 }
3979
3980 if (err == 0) {
3981 err = brcmf_sdiod_intr_register(sdiodev);
3982 if (err != 0)
3983 brcmf_err("intr register failed:%d\n", err);
3984 }
3985
3986 /* If we didn't come up, turn off backplane clock */
3987 if (err != 0)
3988 brcmf_sdio_clkctl(bus, CLK_NONE, false);
3989
3990 sdio_release_host(sdiodev->func[1]);
3991
3992 err = brcmf_bus_start(dev);
3993 if (err != 0) {
3994 brcmf_err("dongle is not responding\n");
3995 goto fail;
3996 }
3997 return;
3998
3999release:
4000 sdio_release_host(sdiodev->func[1]);
4001fail:
4002 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4003 device_release_driver(dev);
4004}
4005
4030struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4006struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4031{ 4007{
4032 int ret; 4008 int ret;
@@ -4110,8 +4086,13 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4110 goto fail; 4086 goto fail;
4111 } 4087 }
4112 4088
4089 /* Query the F2 block size, set roundup accordingly */
4090 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4091 bus->roundup = min(max_roundup, bus->blocksize);
4092
4113 /* Allocate buffers */ 4093 /* Allocate buffers */
4114 if (bus->sdiodev->bus_if->maxctl) { 4094 if (bus->sdiodev->bus_if->maxctl) {
4095 bus->sdiodev->bus_if->maxctl += bus->roundup;
4115 bus->rxblen = 4096 bus->rxblen =
4116 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN), 4097 roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
4117 ALIGNMENT) + bus->head_align; 4098 ALIGNMENT) + bus->head_align;
@@ -4139,10 +4120,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4139 bus->idletime = BRCMF_IDLE_INTERVAL; 4120 bus->idletime = BRCMF_IDLE_INTERVAL;
4140 bus->idleclock = BRCMF_IDLE_ACTIVE; 4121 bus->idleclock = BRCMF_IDLE_ACTIVE;
4141 4122
4142 /* Query the F2 block size, set roundup accordingly */
4143 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4144 bus->roundup = min(max_roundup, bus->blocksize);
4145
4146 /* SR state */ 4123 /* SR state */
4147 bus->sleeping = false; 4124 bus->sleeping = false;
4148 bus->sr_enabled = false; 4125 bus->sr_enabled = false;
@@ -4150,10 +4127,14 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4150 brcmf_sdio_debugfs_create(bus); 4127 brcmf_sdio_debugfs_create(bus);
4151 brcmf_dbg(INFO, "completed!!\n"); 4128 brcmf_dbg(INFO, "completed!!\n");
4152 4129
4153 /* if firmware path present try to download and bring up bus */ 4130 ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
4154 ret = brcmf_bus_start(bus->sdiodev->dev); 4131 brcmf_sdio_get_fwname(bus->ci,
4132 BRCMF_FIRMWARE_BIN),
4133 brcmf_sdio_get_fwname(bus->ci,
4134 BRCMF_FIRMWARE_NVRAM),
4135 brcmf_sdio_firmware_callback);
4155 if (ret != 0) { 4136 if (ret != 0) {
4156 brcmf_err("dongle is not responding\n"); 4137 brcmf_err("async firmware request failed: %d\n", ret);
4157 goto fail; 4138 goto fail;
4158 } 4139 }
4159 4140
@@ -4173,9 +4154,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4173 /* De-register interrupt handler */ 4154 /* De-register interrupt handler */
4174 brcmf_sdiod_intr_unregister(bus->sdiodev); 4155 brcmf_sdiod_intr_unregister(bus->sdiodev);
4175 4156
4176 if (bus->sdiodev->bus_if->drvr) { 4157 brcmf_detach(bus->sdiodev->dev);
4177 brcmf_detach(bus->sdiodev->dev);
4178 }
4179 4158
4180 cancel_work_sync(&bus->datawork); 4159 cancel_work_sync(&bus->datawork);
4181 if (bus->brcmf_wq) 4160 if (bus->brcmf_wq)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
new file mode 100644
index 000000000000..7b7d237c1ddb
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/firmware.h>
21
22#include "dhd_dbg.h"
23#include "firmware.h"
24
25enum nvram_parser_state {
26 IDLE,
27 KEY,
28 VALUE,
29 COMMENT,
30 END
31};
32
33/**
34 * struct nvram_parser - internal info for parser.
35 *
36 * @state: current parser state.
37 * @fwnv: input buffer being parsed.
38 * @nvram: output buffer with parse result.
39 * @nvram_len: lenght of parse result.
40 * @line: current line.
41 * @column: current column in line.
42 * @pos: byte offset in input buffer.
43 * @entry: start position of key,value entry.
44 */
45struct nvram_parser {
46 enum nvram_parser_state state;
47 const struct firmware *fwnv;
48 u8 *nvram;
49 u32 nvram_len;
50 u32 line;
51 u32 column;
52 u32 pos;
53 u32 entry;
54};
55
56static bool is_nvram_char(char c)
57{
58 /* comment marker excluded */
59 if (c == '#')
60 return false;
61
62 /* key and value may have any other readable character */
63 return (c > 0x20 && c < 0x7f);
64}
65
66static bool is_whitespace(char c)
67{
68 return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
69}
70
71static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
72{
73 char c;
74
75 c = nvp->fwnv->data[nvp->pos];
76 if (c == '\n')
77 return COMMENT;
78 if (is_whitespace(c))
79 goto proceed;
80 if (c == '#')
81 return COMMENT;
82 if (is_nvram_char(c)) {
83 nvp->entry = nvp->pos;
84 return KEY;
85 }
86 brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
87 nvp->line, nvp->column);
88proceed:
89 nvp->column++;
90 nvp->pos++;
91 return IDLE;
92}
93
94static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
95{
96 enum nvram_parser_state st = nvp->state;
97 char c;
98
99 c = nvp->fwnv->data[nvp->pos];
100 if (c == '=') {
101 st = VALUE;
102 } else if (!is_nvram_char(c)) {
103 brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
104 nvp->line, nvp->column);
105 return COMMENT;
106 }
107
108 nvp->column++;
109 nvp->pos++;
110 return st;
111}
112
113static enum nvram_parser_state
114brcmf_nvram_handle_value(struct nvram_parser *nvp)
115{
116 char c;
117 char *skv;
118 char *ekv;
119 u32 cplen;
120
121 c = nvp->fwnv->data[nvp->pos];
122 if (!is_nvram_char(c)) {
123 /* key,value pair complete */
124 ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
125 skv = (u8 *)&nvp->fwnv->data[nvp->entry];
126 cplen = ekv - skv;
127 /* copy to output buffer */
128 memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
129 nvp->nvram_len += cplen;
130 nvp->nvram[nvp->nvram_len] = '\0';
131 nvp->nvram_len++;
132 return IDLE;
133 }
134 nvp->pos++;
135 nvp->column++;
136 return VALUE;
137}
138
139static enum nvram_parser_state
140brcmf_nvram_handle_comment(struct nvram_parser *nvp)
141{
142 char *eol, *sol;
143
144 sol = (char *)&nvp->fwnv->data[nvp->pos];
145 eol = strchr(sol, '\n');
146 if (eol == NULL)
147 return END;
148
149 /* eat all moving to next line */
150 nvp->line++;
151 nvp->column = 1;
152 nvp->pos += (eol - sol) + 1;
153 return IDLE;
154}
155
156static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
157{
158 /* final state */
159 return END;
160}
161
162static enum nvram_parser_state
163(*nv_parser_states[])(struct nvram_parser *nvp) = {
164 brcmf_nvram_handle_idle,
165 brcmf_nvram_handle_key,
166 brcmf_nvram_handle_value,
167 brcmf_nvram_handle_comment,
168 brcmf_nvram_handle_end
169};
170
171static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
172 const struct firmware *nv)
173{
174 memset(nvp, 0, sizeof(*nvp));
175 nvp->fwnv = nv;
176 /* Alloc for extra 0 byte + roundup by 4 + length field */
177 nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
178 if (!nvp->nvram)
179 return -ENOMEM;
180
181 nvp->line = 1;
182 nvp->column = 1;
183 return 0;
184}
185
186/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
187 * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
188 * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
189 * End of buffer is completed with token identifying length of buffer.
190 */
191static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
192{
193 struct nvram_parser nvp;
194 u32 pad;
195 u32 token;
196 __le32 token_le;
197
198 if (brcmf_init_nvram_parser(&nvp, nv) < 0)
199 return NULL;
200
201 while (nvp.pos < nv->size) {
202 nvp.state = nv_parser_states[nvp.state](&nvp);
203 if (nvp.state == END)
204 break;
205 }
206 pad = nvp.nvram_len;
207 *new_length = roundup(nvp.nvram_len + 1, 4);
208 while (pad != *new_length) {
209 nvp.nvram[pad] = 0;
210 pad++;
211 }
212
213 token = *new_length / 4;
214 token = (~token << 16) | (token & 0x0000FFFF);
215 token_le = cpu_to_le32(token);
216
217 memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
218 *new_length += sizeof(token_le);
219
220 return nvp.nvram;
221}
222
223void brcmf_fw_nvram_free(void *nvram)
224{
225 kfree(nvram);
226}
227
228struct brcmf_fw {
229 struct device *dev;
230 u16 flags;
231 const struct firmware *code;
232 const char *nvram_name;
233 void (*done)(struct device *dev, const struct firmware *fw,
234 void *nvram_image, u32 nvram_len);
235};
236
237static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
238{
239 struct brcmf_fw *fwctx = ctx;
240 u32 nvram_length = 0;
241 void *nvram = NULL;
242
243 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
244 if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
245 goto fail;
246
247 if (fw) {
248 nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
249 release_firmware(fw);
250 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
251 goto fail;
252 }
253
254 fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
255 kfree(fwctx);
256 return;
257
258fail:
259 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
260 if (fwctx->code)
261 release_firmware(fwctx->code);
262 device_release_driver(fwctx->dev);
263 kfree(fwctx);
264}
265
266static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
267{
268 struct brcmf_fw *fwctx = ctx;
269 int ret;
270
271 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
272 if (!fw)
273 goto fail;
274
275 /* only requested code so done here */
276 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
277 fwctx->done(fwctx->dev, fw, NULL, 0);
278 kfree(fwctx);
279 return;
280 }
281 fwctx->code = fw;
282 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
283 fwctx->dev, GFP_KERNEL, fwctx,
284 brcmf_fw_request_nvram_done);
285
286 if (!ret)
287 return;
288
289 /* when nvram is optional call .done() callback here */
290 if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
291 fwctx->done(fwctx->dev, fw, NULL, 0);
292 kfree(fwctx);
293 return;
294 }
295
296 /* failed nvram request */
297 release_firmware(fw);
298fail:
299 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
300 device_release_driver(fwctx->dev);
301 kfree(fwctx);
302}
303
304int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
305 const char *code, const char *nvram,
306 void (*fw_cb)(struct device *dev,
307 const struct firmware *fw,
308 void *nvram_image, u32 nvram_len))
309{
310 struct brcmf_fw *fwctx;
311
312 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
313 if (!fw_cb || !code)
314 return -EINVAL;
315
316 if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
317 return -EINVAL;
318
319 fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
320 if (!fwctx)
321 return -ENOMEM;
322
323 fwctx->dev = dev;
324 fwctx->flags = flags;
325 fwctx->done = fw_cb;
326 if (flags & BRCMF_FW_REQUEST_NVRAM)
327 fwctx->nvram_name = nvram;
328
329 return request_firmware_nowait(THIS_MODULE, true, code, dev,
330 GFP_KERNEL, fwctx,
331 brcmf_fw_request_code_done);
332}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index d454580928c9..6431bfd7afff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -13,12 +13,24 @@
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16#ifndef BRCMFMAC_NVRAM_H 16#ifndef BRCMFMAC_FIRMWARE_H
17#define BRCMFMAC_NVRAM_H 17#define BRCMFMAC_FIRMWARE_H
18 18
19#define BRCMF_FW_REQUEST 0x000F
20#define BRCMF_FW_REQUEST_NVRAM 0x0001
21#define BRCMF_FW_REQ_FLAGS 0x00F0
22#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010
19 23
20void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length); 24void brcmf_fw_nvram_free(void *nvram);
21void brcmf_nvram_free(void *nvram); 25/*
22 26 * Request firmware(s) asynchronously. When the asynchronous request
27 * fails it will not use the callback, but call device_release_driver()
28 * instead which will call the driver .remove() callback.
29 */
30int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
31 const char *code, const char *nvram,
32 void (*fw_cb)(struct device *dev,
33 const struct firmware *fw,
34 void *nvram_image, u32 nvram_len));
23 35
24#endif /* BRCMFMAC_NVRAM_H */ 36#endif /* BRCMFMAC_FIRMWARE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 614e4888504f..2bc68a2137fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -53,6 +53,14 @@
53#define BRCMF_OBSS_COEX_OFF 0 53#define BRCMF_OBSS_COEX_OFF 0
54#define BRCMF_OBSS_COEX_ON 1 54#define BRCMF_OBSS_COEX_ON 1
55 55
56/* join preference types for join_pref iovar */
57enum brcmf_join_pref_types {
58 BRCMF_JOIN_PREF_RSSI = 1,
59 BRCMF_JOIN_PREF_WPA,
60 BRCMF_JOIN_PREF_BAND,
61 BRCMF_JOIN_PREF_RSSI_DELTA,
62};
63
56enum brcmf_fil_p2p_if_types { 64enum brcmf_fil_p2p_if_types {
57 BRCMF_FIL_P2P_IF_CLIENT, 65 BRCMF_FIL_P2P_IF_CLIENT,
58 BRCMF_FIL_P2P_IF_GO, 66 BRCMF_FIL_P2P_IF_GO,
@@ -282,6 +290,22 @@ struct brcmf_assoc_params_le {
282 __le16 chanspec_list[1]; 290 __le16 chanspec_list[1];
283}; 291};
284 292
293/**
294 * struct join_pref params - parameters for preferred join selection.
295 *
296 * @type: preference type (see enum brcmf_join_pref_types).
297 * @len: length of bytes following (currently always 2).
298 * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
299 * @band: band to which selection preference applies.
300 * This is used if @type is BAND or RSSI_DELTA.
301 */
302struct brcmf_join_pref_params {
303 u8 type;
304 u8 len;
305 u8 rssi_gain;
306 u8 band;
307};
308
285/* used for join with or without a specific bssid and channel list */ 309/* used for join with or without a specific bssid and channel list */
286struct brcmf_join_params { 310struct brcmf_join_params {
287 struct brcmf_ssid_le ssid_le; 311 struct brcmf_ssid_le ssid_le;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index c3e7d76dbf35..699908de314a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -476,6 +476,7 @@ struct brcmf_fws_info {
476 bool bus_flow_blocked; 476 bool bus_flow_blocked;
477 bool creditmap_received; 477 bool creditmap_received;
478 u8 mode; 478 u8 mode;
479 bool avoid_queueing;
479}; 480};
480 481
481/* 482/*
@@ -1369,13 +1370,12 @@ done:
1369} 1370}
1370 1371
1371static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, 1372static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1372 struct sk_buff *skb, u32 genbit, 1373 struct sk_buff *skb, u8 ifidx,
1373 u16 seq) 1374 u32 genbit, u16 seq)
1374{ 1375{
1375 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; 1376 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1376 u32 hslot; 1377 u32 hslot;
1377 int ret; 1378 int ret;
1378 u8 ifidx;
1379 1379
1380 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); 1380 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1381 1381
@@ -1389,29 +1389,21 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1389 1389
1390 entry->generation = genbit; 1390 entry->generation = genbit;
1391 1391
1392 ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); 1392 brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
1393 if (ret == 0) { 1393 brcmf_skbcb(skb)->htod_seq = seq;
1394 brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit); 1394 if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
1395 brcmf_skbcb(skb)->htod_seq = seq; 1395 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
1396 if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) { 1396 brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
1397 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1); 1397 } else {
1398 brcmf_skb_htod_seq_set_field(skb, FROMFW, 0); 1398 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
1399 } else {
1400 brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
1401 }
1402 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
1403 skb);
1404 } 1399 }
1400 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
1405 1401
1406 if (ret != 0) { 1402 if (ret != 0) {
1407 /* suppress q is full or hdrpull failed, drop this packet */ 1403 /* suppress q is full drop this packet */
1408 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, 1404 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
1409 true);
1410 } else { 1405 } else {
1411 /* 1406 /* Mark suppressed to avoid a double free during wlfc cleanup */
1412 * Mark suppressed to avoid a double free during
1413 * wlfc cleanup
1414 */
1415 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot); 1407 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
1416 } 1408 }
1417 1409
@@ -1428,6 +1420,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1428 struct sk_buff *skb; 1420 struct sk_buff *skb;
1429 struct brcmf_skbuff_cb *skcb; 1421 struct brcmf_skbuff_cb *skcb;
1430 struct brcmf_fws_mac_descriptor *entry = NULL; 1422 struct brcmf_fws_mac_descriptor *entry = NULL;
1423 u8 ifidx;
1431 1424
1432 brcmf_dbg(DATA, "flags %d\n", flags); 1425 brcmf_dbg(DATA, "flags %d\n", flags);
1433 1426
@@ -1476,12 +1469,15 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1476 } 1469 }
1477 brcmf_fws_macdesc_return_req_credit(skb); 1470 brcmf_fws_macdesc_return_req_credit(skb);
1478 1471
1472 if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
1473 brcmu_pkt_buf_free_skb(skb);
1474 return -EINVAL;
1475 }
1479 if (!remove_from_hanger) 1476 if (!remove_from_hanger)
1480 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit, 1477 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
1481 seq); 1478 genbit, seq);
1482
1483 if (remove_from_hanger || ret) 1479 if (remove_from_hanger || ret)
1484 brcmf_txfinalize(fws->drvr, skb, true); 1480 brcmf_txfinalize(fws->drvr, skb, ifidx, true);
1485 1481
1486 return 0; 1482 return 0;
1487} 1483}
@@ -1868,7 +1864,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1868 struct ethhdr *eh = (struct ethhdr *)(skb->data); 1864 struct ethhdr *eh = (struct ethhdr *)(skb->data);
1869 int fifo = BRCMF_FWS_FIFO_BCMC; 1865 int fifo = BRCMF_FWS_FIFO_BCMC;
1870 bool multicast = is_multicast_ether_addr(eh->h_dest); 1866 bool multicast = is_multicast_ether_addr(eh->h_dest);
1871 bool pae = eh->h_proto == htons(ETH_P_PAE); 1867 int rc = 0;
1872 1868
1873 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); 1869 brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
1874 /* determine the priority */ 1870 /* determine the priority */
@@ -1876,8 +1872,13 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1876 skb->priority = cfg80211_classify8021d(skb, NULL); 1872 skb->priority = cfg80211_classify8021d(skb, NULL);
1877 1873
1878 drvr->tx_multicast += !!multicast; 1874 drvr->tx_multicast += !!multicast;
1879 if (pae) 1875
1880 atomic_inc(&ifp->pend_8021x_cnt); 1876 if (fws->avoid_queueing) {
1877 rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
1878 if (rc < 0)
1879 brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
1880 return rc;
1881 }
1881 1882
1882 /* set control buffer information */ 1883 /* set control buffer information */
1883 skcb->if_flags = 0; 1884 skcb->if_flags = 0;
@@ -1899,15 +1900,12 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1899 brcmf_fws_schedule_deq(fws); 1900 brcmf_fws_schedule_deq(fws);
1900 } else { 1901 } else {
1901 brcmf_err("drop skb: no hanger slot\n"); 1902 brcmf_err("drop skb: no hanger slot\n");
1902 if (pae) { 1903 brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
1903 atomic_dec(&ifp->pend_8021x_cnt); 1904 rc = -ENOMEM;
1904 if (waitqueue_active(&ifp->pend_8021x_wait))
1905 wake_up(&ifp->pend_8021x_wait);
1906 }
1907 brcmu_pkt_buf_free_skb(skb);
1908 } 1905 }
1909 brcmf_fws_unlock(fws); 1906 brcmf_fws_unlock(fws);
1910 return 0; 1907
1908 return rc;
1911} 1909}
1912 1910
1913void brcmf_fws_reset_interface(struct brcmf_if *ifp) 1911void brcmf_fws_reset_interface(struct brcmf_if *ifp)
@@ -1982,7 +1980,8 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
1982 ret = brcmf_proto_txdata(drvr, ifidx, 0, skb); 1980 ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
1983 brcmf_fws_lock(fws); 1981 brcmf_fws_lock(fws);
1984 if (ret < 0) 1982 if (ret < 0)
1985 brcmf_txfinalize(drvr, skb, false); 1983 brcmf_txfinalize(drvr, skb, ifidx,
1984 false);
1986 if (fws->bus_flow_blocked) 1985 if (fws->bus_flow_blocked)
1987 break; 1986 break;
1988 } 1987 }
@@ -2039,6 +2038,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
2039 fws->drvr = drvr; 2038 fws->drvr = drvr;
2040 fws->fcmode = fcmode; 2039 fws->fcmode = fcmode;
2041 2040
2041 if ((drvr->bus_if->always_use_fws_queue == false) &&
2042 (fcmode == BRCMF_FWS_FCMODE_NONE)) {
2043 fws->avoid_queueing = true;
2044 brcmf_dbg(INFO, "FWS queueing will be avoided\n");
2045 return 0;
2046 }
2047
2042 fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); 2048 fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
2043 if (fws->fws_wq == NULL) { 2049 if (fws->fws_wq == NULL) {
2044 brcmf_err("workqueue creation failed\n"); 2050 brcmf_err("workqueue creation failed\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
deleted file mode 100644
index d5ef86db631b..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/firmware.h>
20
21#include "nvram.h"
22
23/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
24 * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
25 * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
26 * End of buffer is completed with token identifying length of buffer.
27 */
28void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
29{
30 u8 *nvram;
31 u32 i;
32 u32 len;
33 u32 column;
34 u8 val;
35 bool comment;
36 u32 token;
37 __le32 token_le;
38
39 /* Alloc for extra 0 byte + roundup by 4 + length field */
40 nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
41 if (!nvram)
42 return NULL;
43
44 len = 0;
45 column = 0;
46 comment = false;
47 for (i = 0; i < nv->size; i++) {
48 val = nv->data[i];
49 if (val == 0)
50 break;
51 if (val == '\r')
52 continue;
53 if (comment && (val != '\n'))
54 continue;
55 comment = false;
56 if (val == '#') {
57 comment = true;
58 continue;
59 }
60 if (val == '\n') {
61 if (column == 0)
62 continue;
63 nvram[len] = 0;
64 len++;
65 column = 0;
66 continue;
67 }
68 nvram[len] = val;
69 len++;
70 column++;
71 }
72 column = len;
73 *new_length = roundup(len + 1, 4);
74 while (column != *new_length) {
75 nvram[column] = 0;
76 column++;
77 }
78
79 token = *new_length / 4;
80 token = (~token << 16) | (token & 0x0000FFFF);
81 token_le = cpu_to_le32(token);
82
83 memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
84 *new_length += sizeof(token_le);
85
86 return nvram;
87}
88
89void brcmf_nvram_free(void *nvram)
90{
91 kfree(nvram);
92}
93
94
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 24f65cd53859..6db51a666f61 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -25,6 +25,7 @@
25#include <dhd_bus.h> 25#include <dhd_bus.h>
26#include <dhd_dbg.h> 26#include <dhd_dbg.h>
27 27
28#include "firmware.h"
28#include "usb_rdl.h" 29#include "usb_rdl.h"
29#include "usb.h" 30#include "usb.h"
30 31
@@ -61,12 +62,6 @@ struct brcmf_usb_image {
61 u8 *image; 62 u8 *image;
62 int image_len; 63 int image_len;
63}; 64};
64static struct list_head fw_image_list;
65
66struct intr_transfer_buf {
67 u32 notification;
68 u32 reserved;
69};
70 65
71struct brcmf_usbdev_info { 66struct brcmf_usbdev_info {
72 struct brcmf_usbdev bus_pub; /* MUST BE FIRST */ 67 struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
@@ -75,7 +70,7 @@ struct brcmf_usbdev_info {
75 struct list_head rx_postq; 70 struct list_head rx_postq;
76 struct list_head tx_freeq; 71 struct list_head tx_freeq;
77 struct list_head tx_postq; 72 struct list_head tx_postq;
78 uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; 73 uint rx_pipe, tx_pipe, rx_pipe2;
79 74
80 int rx_low_watermark; 75 int rx_low_watermark;
81 int tx_low_watermark; 76 int tx_low_watermark;
@@ -87,7 +82,7 @@ struct brcmf_usbdev_info {
87 struct brcmf_usbreq *tx_reqs; 82 struct brcmf_usbreq *tx_reqs;
88 struct brcmf_usbreq *rx_reqs; 83 struct brcmf_usbreq *rx_reqs;
89 84
90 u8 *image; /* buffer for combine fw and nvram */ 85 const u8 *image; /* buffer for combine fw and nvram */
91 int image_len; 86 int image_len;
92 87
93 struct usb_device *usbdev; 88 struct usb_device *usbdev;
@@ -104,10 +99,6 @@ struct brcmf_usbdev_info {
104 ulong ctl_op; 99 ulong ctl_op;
105 100
106 struct urb *bulk_urb; /* used for FW download */ 101 struct urb *bulk_urb; /* used for FW download */
107 struct urb *intr_urb; /* URB for interrupt endpoint */
108 int intr_size; /* Size of interrupt message */
109 int interval; /* Interrupt polling interval */
110 struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
111}; 102};
112 103
113static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 104static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -531,39 +522,6 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
531 } 522 }
532} 523}
533 524
534static void
535brcmf_usb_intr_complete(struct urb *urb)
536{
537 struct brcmf_usbdev_info *devinfo =
538 (struct brcmf_usbdev_info *)urb->context;
539 int err;
540
541 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
542
543 if (devinfo == NULL)
544 return;
545
546 if (unlikely(urb->status)) {
547 if (urb->status == -ENOENT ||
548 urb->status == -ESHUTDOWN ||
549 urb->status == -ENODEV) {
550 brcmf_usb_state_change(devinfo,
551 BRCMFMAC_USB_STATE_DOWN);
552 }
553 }
554
555 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
556 brcmf_err("intr cb when DBUS down, ignoring\n");
557 return;
558 }
559
560 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
561 err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
562 if (err)
563 brcmf_err("usb_submit_urb, err=%d\n", err);
564 }
565}
566
567static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) 525static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
568{ 526{
569 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 527 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
@@ -619,7 +577,6 @@ static int brcmf_usb_up(struct device *dev)
619{ 577{
620 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 578 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
621 u16 ifnum; 579 u16 ifnum;
622 int ret;
623 580
624 brcmf_dbg(USB, "Enter\n"); 581 brcmf_dbg(USB, "Enter\n");
625 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) 582 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
@@ -628,23 +585,6 @@ static int brcmf_usb_up(struct device *dev)
628 /* Success, indicate devinfo is fully up */ 585 /* Success, indicate devinfo is fully up */
629 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP); 586 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
630 587
631 if (devinfo->intr_urb) {
632 usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
633 devinfo->intr_pipe,
634 &devinfo->intr,
635 devinfo->intr_size,
636 (usb_complete_t)brcmf_usb_intr_complete,
637 devinfo,
638 devinfo->interval);
639
640 ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
641 if (ret) {
642 brcmf_err("USB_SUBMIT_URB failed with status %d\n",
643 ret);
644 return -EINVAL;
645 }
646 }
647
648 if (devinfo->ctl_urb) { 588 if (devinfo->ctl_urb) {
649 devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0); 589 devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
650 devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0); 590 devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
@@ -681,8 +621,6 @@ static void brcmf_usb_down(struct device *dev)
681 return; 621 return;
682 622
683 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN); 623 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
684 if (devinfo->intr_urb)
685 usb_kill_urb(devinfo->intr_urb);
686 624
687 if (devinfo->ctl_urb) 625 if (devinfo->ctl_urb)
688 usb_kill_urb(devinfo->ctl_urb); 626 usb_kill_urb(devinfo->ctl_urb);
@@ -1021,7 +959,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1021 } 959 }
1022 960
1023 err = brcmf_usb_dlstart(devinfo, 961 err = brcmf_usb_dlstart(devinfo,
1024 devinfo->image, devinfo->image_len); 962 (u8 *)devinfo->image, devinfo->image_len);
1025 if (err == 0) 963 if (err == 0)
1026 err = brcmf_usb_dlrun(devinfo); 964 err = brcmf_usb_dlrun(devinfo);
1027 return err; 965 return err;
@@ -1036,7 +974,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
1036 brcmf_usb_free_q(&devinfo->rx_freeq, false); 974 brcmf_usb_free_q(&devinfo->rx_freeq, false);
1037 brcmf_usb_free_q(&devinfo->tx_freeq, false); 975 brcmf_usb_free_q(&devinfo->tx_freeq, false);
1038 976
1039 usb_free_urb(devinfo->intr_urb);
1040 usb_free_urb(devinfo->ctl_urb); 977 usb_free_urb(devinfo->ctl_urb);
1041 usb_free_urb(devinfo->bulk_urb); 978 usb_free_urb(devinfo->bulk_urb);
1042 979
@@ -1080,68 +1017,20 @@ static int check_file(const u8 *headers)
1080 return -1; 1017 return -1;
1081} 1018}
1082 1019
1083static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo) 1020static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
1084{ 1021{
1085 s8 *fwname;
1086 const struct firmware *fw;
1087 struct brcmf_usb_image *fw_image;
1088 int err;
1089
1090 brcmf_dbg(USB, "Enter\n");
1091 switch (devinfo->bus_pub.devid) { 1022 switch (devinfo->bus_pub.devid) {
1092 case 43143: 1023 case 43143:
1093 fwname = BRCMF_USB_43143_FW_NAME; 1024 return BRCMF_USB_43143_FW_NAME;
1094 break;
1095 case 43235: 1025 case 43235:
1096 case 43236: 1026 case 43236:
1097 case 43238: 1027 case 43238:
1098 fwname = BRCMF_USB_43236_FW_NAME; 1028 return BRCMF_USB_43236_FW_NAME;
1099 break;
1100 case 43242: 1029 case 43242:
1101 fwname = BRCMF_USB_43242_FW_NAME; 1030 return BRCMF_USB_43242_FW_NAME;
1102 break;
1103 default: 1031 default:
1104 return -EINVAL; 1032 return NULL;
1105 break;
1106 }
1107 brcmf_dbg(USB, "Loading FW %s\n", fwname);
1108 list_for_each_entry(fw_image, &fw_image_list, list) {
1109 if (fw_image->fwname == fwname) {
1110 devinfo->image = fw_image->image;
1111 devinfo->image_len = fw_image->image_len;
1112 return 0;
1113 }
1114 }
1115 /* fw image not yet loaded. Load it now and add to list */
1116 err = request_firmware(&fw, fwname, devinfo->dev);
1117 if (!fw) {
1118 brcmf_err("fail to request firmware %s\n", fwname);
1119 return err;
1120 }
1121 if (check_file(fw->data) < 0) {
1122 brcmf_err("invalid firmware %s\n", fwname);
1123 return -EINVAL;
1124 } 1033 }
1125
1126 fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
1127 if (!fw_image)
1128 return -ENOMEM;
1129 INIT_LIST_HEAD(&fw_image->list);
1130 list_add_tail(&fw_image->list, &fw_image_list);
1131 fw_image->fwname = fwname;
1132 fw_image->image = vmalloc(fw->size);
1133 if (!fw_image->image)
1134 return -ENOMEM;
1135
1136 memcpy(fw_image->image, fw->data, fw->size);
1137 fw_image->image_len = fw->size;
1138
1139 release_firmware(fw);
1140
1141 devinfo->image = fw_image->image;
1142 devinfo->image_len = fw_image->image_len;
1143
1144 return 0;
1145} 1034}
1146 1035
1147 1036
@@ -1186,11 +1075,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1186 goto error; 1075 goto error;
1187 devinfo->tx_freecount = ntxq; 1076 devinfo->tx_freecount = ntxq;
1188 1077
1189 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
1190 if (!devinfo->intr_urb) {
1191 brcmf_err("usb_alloc_urb (intr) failed\n");
1192 goto error;
1193 }
1194 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC); 1078 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
1195 if (!devinfo->ctl_urb) { 1079 if (!devinfo->ctl_urb) {
1196 brcmf_err("usb_alloc_urb (ctl) failed\n"); 1080 brcmf_err("usb_alloc_urb (ctl) failed\n");
@@ -1202,16 +1086,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1202 goto error; 1086 goto error;
1203 } 1087 }
1204 1088
1205 if (!brcmf_usb_dlneeded(devinfo))
1206 return &devinfo->bus_pub;
1207
1208 brcmf_dbg(USB, "Start fw downloading\n");
1209 if (brcmf_usb_get_fw(devinfo))
1210 goto error;
1211
1212 if (brcmf_usb_fw_download(devinfo))
1213 goto error;
1214
1215 return &devinfo->bus_pub; 1089 return &devinfo->bus_pub;
1216 1090
1217error: 1091error:
@@ -1222,18 +1096,77 @@ error:
1222 1096
1223static struct brcmf_bus_ops brcmf_usb_bus_ops = { 1097static struct brcmf_bus_ops brcmf_usb_bus_ops = {
1224 .txdata = brcmf_usb_tx, 1098 .txdata = brcmf_usb_tx,
1225 .init = brcmf_usb_up,
1226 .stop = brcmf_usb_down, 1099 .stop = brcmf_usb_down,
1227 .txctl = brcmf_usb_tx_ctlpkt, 1100 .txctl = brcmf_usb_tx_ctlpkt,
1228 .rxctl = brcmf_usb_rx_ctlpkt, 1101 .rxctl = brcmf_usb_rx_ctlpkt,
1229}; 1102};
1230 1103
1104static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
1105{
1106 int ret;
1107
1108 /* Attach to the common driver interface */
1109 ret = brcmf_attach(devinfo->dev);
1110 if (ret) {
1111 brcmf_err("brcmf_attach failed\n");
1112 return ret;
1113 }
1114
1115 ret = brcmf_usb_up(devinfo->dev);
1116 if (ret)
1117 goto fail;
1118
1119 ret = brcmf_bus_start(devinfo->dev);
1120 if (ret)
1121 goto fail;
1122
1123 return 0;
1124fail:
1125 brcmf_detach(devinfo->dev);
1126 return ret;
1127}
1128
1129static void brcmf_usb_probe_phase2(struct device *dev,
1130 const struct firmware *fw,
1131 void *nvram, u32 nvlen)
1132{
1133 struct brcmf_bus *bus = dev_get_drvdata(dev);
1134 struct brcmf_usbdev_info *devinfo;
1135 int ret;
1136
1137 brcmf_dbg(USB, "Start fw downloading\n");
1138 ret = check_file(fw->data);
1139 if (ret < 0) {
1140 brcmf_err("invalid firmware\n");
1141 release_firmware(fw);
1142 goto error;
1143 }
1144
1145 devinfo = bus->bus_priv.usb->devinfo;
1146 devinfo->image = fw->data;
1147 devinfo->image_len = fw->size;
1148
1149 ret = brcmf_usb_fw_download(devinfo);
1150 release_firmware(fw);
1151 if (ret)
1152 goto error;
1153
1154 ret = brcmf_usb_bus_setup(devinfo);
1155 if (ret)
1156 goto error;
1157
1158 return;
1159error:
1160 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
1161 device_release_driver(dev);
1162}
1163
1231static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) 1164static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1232{ 1165{
1233 struct brcmf_bus *bus = NULL; 1166 struct brcmf_bus *bus = NULL;
1234 struct brcmf_usbdev *bus_pub = NULL; 1167 struct brcmf_usbdev *bus_pub = NULL;
1235 int ret;
1236 struct device *dev = devinfo->dev; 1168 struct device *dev = devinfo->dev;
1169 int ret;
1237 1170
1238 brcmf_dbg(USB, "Enter\n"); 1171 brcmf_dbg(USB, "Enter\n");
1239 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ); 1172 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
@@ -1254,22 +1187,18 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1254 bus->chip = bus_pub->devid; 1187 bus->chip = bus_pub->devid;
1255 bus->chiprev = bus_pub->chiprev; 1188 bus->chiprev = bus_pub->chiprev;
1256 bus->proto_type = BRCMF_PROTO_BCDC; 1189 bus->proto_type = BRCMF_PROTO_BCDC;
1190 bus->always_use_fws_queue = true;
1257 1191
1258 /* Attach to the common driver interface */ 1192 if (!brcmf_usb_dlneeded(devinfo)) {
1259 ret = brcmf_attach(dev); 1193 ret = brcmf_usb_bus_setup(devinfo);
1260 if (ret) { 1194 if (ret)
1261 brcmf_err("brcmf_attach failed\n"); 1195 goto fail;
1262 goto fail;
1263 }
1264
1265 ret = brcmf_bus_start(dev);
1266 if (ret) {
1267 brcmf_err("dongle is not responding\n");
1268 brcmf_detach(dev);
1269 goto fail;
1270 } 1196 }
1271 1197 /* request firmware here */
1198 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
1199 brcmf_usb_probe_phase2);
1272 return 0; 1200 return 0;
1201
1273fail: 1202fail:
1274 /* Release resources in reverse order */ 1203 /* Release resources in reverse order */
1275 kfree(bus); 1204 kfree(bus);
@@ -1357,9 +1286,6 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1357 goto fail; 1286 goto fail;
1358 } 1287 }
1359 1288
1360 endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1361 devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
1362
1363 devinfo->rx_pipe = 0; 1289 devinfo->rx_pipe = 0;
1364 devinfo->rx_pipe2 = 0; 1290 devinfo->rx_pipe2 = 0;
1365 devinfo->tx_pipe = 0; 1291 devinfo->tx_pipe = 0;
@@ -1391,16 +1317,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1391 } 1317 }
1392 } 1318 }
1393 1319
1394 /* Allocate interrupt URB and data buffer */ 1320 if (usb->speed == USB_SPEED_SUPER)
1395 /* RNDIS says 8-byte intr, our old drivers used 4-byte */ 1321 brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
1396 if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16)) 1322 else if (usb->speed == USB_SPEED_HIGH)
1397 devinfo->intr_size = 8;
1398 else
1399 devinfo->intr_size = 4;
1400
1401 devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
1402
1403 if (usb->speed == USB_SPEED_HIGH)
1404 brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n"); 1323 brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
1405 else 1324 else
1406 brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n"); 1325 brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
@@ -1455,23 +1374,18 @@ static int brcmf_usb_resume(struct usb_interface *intf)
1455 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1374 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1456 1375
1457 brcmf_dbg(USB, "Enter\n"); 1376 brcmf_dbg(USB, "Enter\n");
1458 if (!brcmf_attach(devinfo->dev)) 1377 return brcmf_usb_bus_setup(devinfo);
1459 return brcmf_bus_start(&usb->dev);
1460
1461 return 0;
1462} 1378}
1463 1379
1464static int brcmf_usb_reset_resume(struct usb_interface *intf) 1380static int brcmf_usb_reset_resume(struct usb_interface *intf)
1465{ 1381{
1466 struct usb_device *usb = interface_to_usbdev(intf); 1382 struct usb_device *usb = interface_to_usbdev(intf);
1467 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1383 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1468
1469 brcmf_dbg(USB, "Enter\n"); 1384 brcmf_dbg(USB, "Enter\n");
1470 1385
1471 if (!brcmf_usb_fw_download(devinfo)) 1386 return brcmf_fw_get_firmwares(&usb->dev, 0,
1472 return brcmf_usb_resume(intf); 1387 brcmf_usb_get_fwname(devinfo), NULL,
1473 1388 brcmf_usb_probe_phase2);
1474 return -EIO;
1475} 1389}
1476 1390
1477#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c 1391#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
@@ -1506,16 +1420,6 @@ static struct usb_driver brcmf_usbdrvr = {
1506 .disable_hub_initiated_lpm = 1, 1420 .disable_hub_initiated_lpm = 1,
1507}; 1421};
1508 1422
1509static void brcmf_release_fw(struct list_head *q)
1510{
1511 struct brcmf_usb_image *fw_image, *next;
1512
1513 list_for_each_entry_safe(fw_image, next, q, list) {
1514 vfree(fw_image->image);
1515 list_del_init(&fw_image->list);
1516 }
1517}
1518
1519static int brcmf_usb_reset_device(struct device *dev, void *notused) 1423static int brcmf_usb_reset_device(struct device *dev, void *notused)
1520{ 1424{
1521 /* device past is the usb interface so we 1425 /* device past is the usb interface so we
@@ -1534,12 +1438,10 @@ void brcmf_usb_exit(void)
1534 ret = driver_for_each_device(drv, NULL, NULL, 1438 ret = driver_for_each_device(drv, NULL, NULL,
1535 brcmf_usb_reset_device); 1439 brcmf_usb_reset_device);
1536 usb_deregister(&brcmf_usbdrvr); 1440 usb_deregister(&brcmf_usbdrvr);
1537 brcmf_release_fw(&fw_image_list);
1538} 1441}
1539 1442
1540void brcmf_usb_register(void) 1443void brcmf_usb_register(void)
1541{ 1444{
1542 brcmf_dbg(USB, "Enter\n"); 1445 brcmf_dbg(USB, "Enter\n");
1543 INIT_LIST_HEAD(&fw_image_list);
1544 usb_register(&brcmf_usbdrvr); 1446 usb_register(&brcmf_usbdrvr);
1545} 1447}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index be1985296bdc..d8fa276e368b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -221,9 +221,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
221 */ 221 */
222 REG_RULE(2484-10, 2484+10, 20, 6, 20, 0), 222 REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
223 /* IEEE 802.11a, channel 36..64 */ 223 /* IEEE 802.11a, channel 36..64 */
224 REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), 224 REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
225 /* IEEE 802.11a, channel 100..165 */ 225 /* IEEE 802.11a, channel 100..165 */
226 REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } 226 REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
227}; 227};
228 228
229static const u32 __wl_cipher_suites[] = { 229static const u32 __wl_cipher_suites[] = {
@@ -341,6 +341,60 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
341 return qdbm; 341 return qdbm;
342} 342}
343 343
344static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
345 struct cfg80211_chan_def *ch)
346{
347 struct brcmu_chan ch_inf;
348 s32 primary_offset;
349
350 brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
351 ch->chan->center_freq, ch->center_freq1, ch->width);
352 ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
353 primary_offset = ch->center_freq1 - ch->chan->center_freq;
354 switch (ch->width) {
355 case NL80211_CHAN_WIDTH_20:
356 ch_inf.bw = BRCMU_CHAN_BW_20;
357 WARN_ON(primary_offset != 0);
358 break;
359 case NL80211_CHAN_WIDTH_40:
360 ch_inf.bw = BRCMU_CHAN_BW_40;
361 if (primary_offset < 0)
362 ch_inf.sb = BRCMU_CHAN_SB_U;
363 else
364 ch_inf.sb = BRCMU_CHAN_SB_L;
365 break;
366 case NL80211_CHAN_WIDTH_80:
367 ch_inf.bw = BRCMU_CHAN_BW_80;
368 if (primary_offset < 0) {
369 if (primary_offset < -CH_10MHZ_APART)
370 ch_inf.sb = BRCMU_CHAN_SB_UU;
371 else
372 ch_inf.sb = BRCMU_CHAN_SB_UL;
373 } else {
374 if (primary_offset > CH_10MHZ_APART)
375 ch_inf.sb = BRCMU_CHAN_SB_LL;
376 else
377 ch_inf.sb = BRCMU_CHAN_SB_LU;
378 }
379 break;
380 default:
381 WARN_ON_ONCE(1);
382 }
383 switch (ch->chan->band) {
384 case IEEE80211_BAND_2GHZ:
385 ch_inf.band = BRCMU_CHAN_BAND_2G;
386 break;
387 case IEEE80211_BAND_5GHZ:
388 ch_inf.band = BRCMU_CHAN_BAND_5G;
389 break;
390 default:
391 WARN_ON_ONCE(1);
392 }
393 d11inf->encchspec(&ch_inf);
394
395 return ch_inf.chspec;
396}
397
344u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, 398u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
345 struct ieee80211_channel *ch) 399 struct ieee80211_channel *ch)
346{ 400{
@@ -586,6 +640,9 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
586 if (err) 640 if (err)
587 brcmf_err("Scan abort failed\n"); 641 brcmf_err("Scan abort failed\n");
588 } 642 }
643
644 brcmf_set_mpc(ifp, 1);
645
589 /* 646 /*
590 * e-scan can be initiated by scheduled scan 647 * e-scan can be initiated by scheduled scan
591 * which takes precedence. 648 * which takes precedence.
@@ -595,12 +652,10 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
595 cfg->sched_escan = false; 652 cfg->sched_escan = false;
596 if (!aborted) 653 if (!aborted)
597 cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); 654 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
598 brcmf_set_mpc(ifp, 1);
599 } else if (scan_request) { 655 } else if (scan_request) {
600 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", 656 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
601 aborted ? "Aborted" : "Done"); 657 aborted ? "Aborted" : "Done");
602 cfg80211_scan_done(scan_request, aborted); 658 cfg80211_scan_done(scan_request, aborted);
603 brcmf_set_mpc(ifp, 1);
604 } 659 }
605 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) 660 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
606 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); 661 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@ -1236,8 +1291,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1236 params->chandef.chan->center_freq); 1291 params->chandef.chan->center_freq);
1237 if (params->channel_fixed) { 1292 if (params->channel_fixed) {
1238 /* adding chanspec */ 1293 /* adding chanspec */
1239 chanspec = channel_to_chanspec(&cfg->d11inf, 1294 chanspec = chandef_to_chanspec(&cfg->d11inf,
1240 params->chandef.chan); 1295 &params->chandef);
1241 join_params.params_le.chanspec_list[0] = 1296 join_params.params_le.chanspec_list[0] =
1242 cpu_to_le16(chanspec); 1297 cpu_to_le16(chanspec);
1243 join_params.params_le.chanspec_num = cpu_to_le32(1); 1298 join_params.params_le.chanspec_num = cpu_to_le32(1);
@@ -2182,7 +2237,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
2182 2237
2183static s32 2238static s32
2184brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, 2239brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2185 u8 *mac, struct station_info *sinfo) 2240 const u8 *mac, struct station_info *sinfo)
2186{ 2241{
2187 struct brcmf_if *ifp = netdev_priv(ndev); 2242 struct brcmf_if *ifp = netdev_priv(ndev);
2188 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 2243 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
@@ -3124,7 +3179,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3124 } 3179 }
3125 3180
3126 if (!request->n_ssids || !request->n_match_sets) { 3181 if (!request->n_ssids || !request->n_match_sets) {
3127 brcmf_err("Invalid sched scan req!! n_ssids:%d\n", 3182 brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
3128 request->n_ssids); 3183 request->n_ssids);
3129 return -EINVAL; 3184 return -EINVAL;
3130 } 3185 }
@@ -3734,23 +3789,6 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
3734} 3789}
3735 3790
3736static s32 3791static s32
3737brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
3738 struct brcmf_if *ifp,
3739 struct ieee80211_channel *channel)
3740{
3741 u16 chanspec;
3742 s32 err;
3743
3744 brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
3745 channel->center_freq);
3746
3747 chanspec = channel_to_chanspec(&cfg->d11inf, channel);
3748 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
3749
3750 return err;
3751}
3752
3753static s32
3754brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, 3792brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3755 struct cfg80211_ap_settings *settings) 3793 struct cfg80211_ap_settings *settings)
3756{ 3794{
@@ -3765,11 +3803,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3765 struct brcmf_join_params join_params; 3803 struct brcmf_join_params join_params;
3766 enum nl80211_iftype dev_role; 3804 enum nl80211_iftype dev_role;
3767 struct brcmf_fil_bss_enable_le bss_enable; 3805 struct brcmf_fil_bss_enable_le bss_enable;
3806 u16 chanspec;
3768 3807
3769 brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n", 3808 brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
3770 cfg80211_get_chandef_type(&settings->chandef), 3809 settings->chandef.chan->hw_value,
3771 settings->beacon_interval, 3810 settings->chandef.center_freq1, settings->chandef.width,
3772 settings->dtim_period); 3811 settings->beacon_interval, settings->dtim_period);
3773 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n", 3812 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
3774 settings->ssid, settings->ssid_len, settings->auth_type, 3813 settings->ssid, settings->ssid_len, settings->auth_type,
3775 settings->inactivity_timeout); 3814 settings->inactivity_timeout);
@@ -3826,9 +3865,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3826 3865
3827 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); 3866 brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
3828 3867
3829 err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan); 3868 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
3869 err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
3830 if (err < 0) { 3870 if (err < 0) {
3831 brcmf_err("Set Channel failed, %d\n", err); 3871 brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
3832 goto exit; 3872 goto exit;
3833 } 3873 }
3834 3874
@@ -3975,7 +4015,7 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
3975 4015
3976static int 4016static int
3977brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, 4017brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
3978 u8 *mac) 4018 const u8 *mac)
3979{ 4019{
3980 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 4020 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3981 struct brcmf_scb_val_le scbval; 4021 struct brcmf_scb_val_le scbval;
@@ -4203,7 +4243,7 @@ static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
4203} 4243}
4204 4244
4205static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, 4245static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
4206 struct net_device *ndev, u8 *peer, 4246 struct net_device *ndev, const u8 *peer,
4207 enum nl80211_tdls_operation oper) 4247 enum nl80211_tdls_operation oper)
4208{ 4248{
4209 struct brcmf_if *ifp; 4249 struct brcmf_if *ifp;
@@ -4364,6 +4404,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4364 WIPHY_FLAG_OFFCHAN_TX | 4404 WIPHY_FLAG_OFFCHAN_TX |
4365 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 4405 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
4366 WIPHY_FLAG_SUPPORTS_TDLS; 4406 WIPHY_FLAG_SUPPORTS_TDLS;
4407 if (!brcmf_roamoff)
4408 wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
4367 wiphy->mgmt_stypes = brcmf_txrx_stypes; 4409 wiphy->mgmt_stypes = brcmf_txrx_stypes;
4368 wiphy->max_remain_on_channel_duration = 5000; 4410 wiphy->max_remain_on_channel_duration = 5000;
4369 brcmf_wiphy_pno_params(wiphy); 4411 brcmf_wiphy_pno_params(wiphy);
@@ -4685,7 +4727,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4685 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 4727 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
4686 struct ieee80211_channel *chan; 4728 struct ieee80211_channel *chan;
4687 s32 err = 0; 4729 s32 err = 0;
4688 u16 reason;
4689 4730
4690 if (brcmf_is_apmode(ifp->vif)) { 4731 if (brcmf_is_apmode(ifp->vif)) {
4691 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data); 4732 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
@@ -4706,16 +4747,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4706 brcmf_dbg(CONN, "Linkdown\n"); 4747 brcmf_dbg(CONN, "Linkdown\n");
4707 if (!brcmf_is_ibssmode(ifp->vif)) { 4748 if (!brcmf_is_ibssmode(ifp->vif)) {
4708 brcmf_bss_connect_done(cfg, ndev, e, false); 4749 brcmf_bss_connect_done(cfg, ndev, e, false);
4709 if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
4710 &ifp->vif->sme_state)) {
4711 reason = 0;
4712 if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
4713 (e->event_code == BRCMF_E_DISASSOC_IND)) &&
4714 (e->reason != WLAN_REASON_UNSPECIFIED))
4715 reason = e->reason;
4716 cfg80211_disconnected(ndev, reason, NULL, 0,
4717 GFP_KERNEL);
4718 }
4719 } 4750 }
4720 brcmf_link_down(ifp->vif); 4751 brcmf_link_down(ifp->vif);
4721 brcmf_init_prof(ndev_to_prof(ndev)); 4752 brcmf_init_prof(ndev_to_prof(ndev));
@@ -5215,6 +5246,9 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5215 if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) && 5246 if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
5216 ch.bw == BRCMU_CHAN_BW_40) 5247 ch.bw == BRCMU_CHAN_BW_40)
5217 continue; 5248 continue;
5249 if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
5250 ch.bw == BRCMU_CHAN_BW_80)
5251 continue;
5218 update = false; 5252 update = false;
5219 for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { 5253 for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
5220 if (band_chan_arr[j].hw_value == ch.chnum) { 5254 if (band_chan_arr[j].hw_value == ch.chnum) {
@@ -5231,10 +5265,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5231 ieee80211_channel_to_frequency(ch.chnum, band); 5265 ieee80211_channel_to_frequency(ch.chnum, band);
5232 band_chan_arr[index].hw_value = ch.chnum; 5266 band_chan_arr[index].hw_value = ch.chnum;
5233 5267
5234 if (ch.bw == BRCMU_CHAN_BW_40) { 5268 /* assuming the chanspecs order is HT20,
5235 /* assuming the order is HT20, HT40 Upper, 5269 * HT40 upper, HT40 lower, and VHT80.
5236 * HT40 lower from chanspecs 5270 */
5237 */ 5271 if (ch.bw == BRCMU_CHAN_BW_80) {
5272 band_chan_arr[index].flags &=
5273 ~IEEE80211_CHAN_NO_80MHZ;
5274 } else if (ch.bw == BRCMU_CHAN_BW_40) {
5238 ht40_flag = band_chan_arr[index].flags & 5275 ht40_flag = band_chan_arr[index].flags &
5239 IEEE80211_CHAN_NO_HT40; 5276 IEEE80211_CHAN_NO_HT40;
5240 if (ch.sb == BRCMU_CHAN_SB_U) { 5277 if (ch.sb == BRCMU_CHAN_SB_U) {
@@ -5255,8 +5292,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
5255 IEEE80211_CHAN_NO_HT40MINUS; 5292 IEEE80211_CHAN_NO_HT40MINUS;
5256 } 5293 }
5257 } else { 5294 } else {
5295 /* disable other bandwidths for now as mentioned
5296 * order assure they are enabled for subsequent
5297 * chanspecs.
5298 */
5258 band_chan_arr[index].flags = 5299 band_chan_arr[index].flags =
5259 IEEE80211_CHAN_NO_HT40; 5300 IEEE80211_CHAN_NO_HT40 |
5301 IEEE80211_CHAN_NO_80MHZ;
5260 ch.bw = BRCMU_CHAN_BW_20; 5302 ch.bw = BRCMU_CHAN_BW_20;
5261 cfg->d11inf.encchspec(&ch); 5303 cfg->d11inf.encchspec(&ch);
5262 channel = ch.chspec; 5304 channel = ch.chspec;
@@ -5323,13 +5365,63 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
5323 } 5365 }
5324} 5366}
5325 5367
5368static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
5369 u32 bw_cap[2], u32 nchain)
5370{
5371 band->ht_cap.ht_supported = true;
5372 if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
5373 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
5374 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
5375 }
5376 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
5377 band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
5378 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
5379 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
5380 memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
5381 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
5382}
5383
5384static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
5385{
5386 u16 mcs_map;
5387 int i;
5388
5389 for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
5390 mcs_map = (mcs_map << 2) | supp;
5391
5392 return cpu_to_le16(mcs_map);
5393}
5394
5395static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
5396 u32 bw_cap[2], u32 nchain)
5397{
5398 __le16 mcs_map;
5399
5400 /* not allowed in 2.4G band */
5401 if (band->band == IEEE80211_BAND_2GHZ)
5402 return;
5403
5404 band->vht_cap.vht_supported = true;
5405 /* 80MHz is mandatory */
5406 band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
5407 if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
5408 band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
5409 band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
5410 }
5411 /* all support 256-QAM */
5412 mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
5413 band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
5414 band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
5415}
5416
5326static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg) 5417static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5327{ 5418{
5328 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 5419 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
5329 struct wiphy *wiphy; 5420 struct wiphy *wiphy;
5330 s32 phy_list; 5421 s32 phy_list;
5331 u32 band_list[3]; 5422 u32 band_list[3];
5332 u32 nmode; 5423 u32 nmode = 0;
5424 u32 vhtmode = 0;
5333 u32 bw_cap[2] = { 0, 0 }; 5425 u32 bw_cap[2] = { 0, 0 };
5334 u32 rxchain; 5426 u32 rxchain;
5335 u32 nchain; 5427 u32 nchain;
@@ -5360,14 +5452,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5360 brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n", 5452 brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
5361 band_list[0], band_list[1], band_list[2]); 5453 band_list[0], band_list[1], band_list[2]);
5362 5454
5455 (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
5363 err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode); 5456 err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
5364 if (err) { 5457 if (err) {
5365 brcmf_err("nmode error (%d)\n", err); 5458 brcmf_err("nmode error (%d)\n", err);
5366 } else { 5459 } else {
5367 brcmf_get_bwcap(ifp, bw_cap); 5460 brcmf_get_bwcap(ifp, bw_cap);
5368 } 5461 }
5369 brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode, 5462 brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
5370 bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]); 5463 nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
5464 bw_cap[IEEE80211_BAND_5GHZ]);
5371 5465
5372 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain); 5466 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
5373 if (err) { 5467 if (err) {
@@ -5398,17 +5492,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5398 else 5492 else
5399 continue; 5493 continue;
5400 5494
5401 if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) { 5495 if (nmode)
5402 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 5496 brcmf_update_ht_cap(band, bw_cap, nchain);
5403 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 5497 if (vhtmode)
5404 } 5498 brcmf_update_vht_cap(band, bw_cap, nchain);
5405 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
5406 band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
5407 band->ht_cap.ht_supported = true;
5408 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
5409 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
5410 memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
5411 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
5412 bands[band->band] = band; 5499 bands[band->band] = band;
5413 } 5500 }
5414 5501
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 8c5fa4e58139..43c71bfaa474 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
897 return result; 897 return result;
898} 898}
899 899
900static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 900static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
901 u32 queues, bool drop)
901{ 902{
902 struct brcms_info *wl = hw->priv; 903 struct brcms_info *wl = hw->priv;
903 int ret; 904 int ret;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9417cb5a2553..af8ba64ace39 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4870,14 +4870,11 @@ static void brcms_c_detach_module(struct brcms_c_info *wlc)
4870/* 4870/*
4871 * low level detach 4871 * low level detach
4872 */ 4872 */
4873static int brcms_b_detach(struct brcms_c_info *wlc) 4873static void brcms_b_detach(struct brcms_c_info *wlc)
4874{ 4874{
4875 uint i; 4875 uint i;
4876 struct brcms_hw_band *band; 4876 struct brcms_hw_band *band;
4877 struct brcms_hardware *wlc_hw = wlc->hw; 4877 struct brcms_hardware *wlc_hw = wlc->hw;
4878 int callbacks;
4879
4880 callbacks = 0;
4881 4878
4882 brcms_b_detach_dmapio(wlc_hw); 4879 brcms_b_detach_dmapio(wlc_hw);
4883 4880
@@ -4900,9 +4897,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
4900 ai_detach(wlc_hw->sih); 4897 ai_detach(wlc_hw->sih);
4901 wlc_hw->sih = NULL; 4898 wlc_hw->sih = NULL;
4902 } 4899 }
4903
4904 return callbacks;
4905
4906} 4900}
4907 4901
4908/* 4902/*
@@ -4917,14 +4911,15 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
4917 */ 4911 */
4918uint brcms_c_detach(struct brcms_c_info *wlc) 4912uint brcms_c_detach(struct brcms_c_info *wlc)
4919{ 4913{
4920 uint callbacks = 0; 4914 uint callbacks;
4921 4915
4922 if (wlc == NULL) 4916 if (wlc == NULL)
4923 return 0; 4917 return 0;
4924 4918
4925 callbacks += brcms_b_detach(wlc); 4919 brcms_b_detach(wlc);
4926 4920
4927 /* delete software timers */ 4921 /* delete software timers */
4922 callbacks = 0;
4928 if (!brcms_c_radio_monitor_stop(wlc)) 4923 if (!brcms_c_radio_monitor_stop(wlc))
4929 callbacks++; 4924 callbacks++;
4930 4925
diff --git a/drivers/net/wireless/brcm80211/brcmutil/d11.c b/drivers/net/wireless/brcm80211/brcmutil/d11.c
index 30e54e2c6c9b..2b2522bdd8eb 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/d11.c
@@ -21,19 +21,46 @@
21#include <brcmu_wifi.h> 21#include <brcmu_wifi.h>
22#include <brcmu_d11.h> 22#include <brcmu_d11.h>
23 23
24static void brcmu_d11n_encchspec(struct brcmu_chan *ch) 24static u16 d11n_sb(enum brcmu_chan_sb sb)
25{ 25{
26 ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK; 26 switch (sb) {
27 case BRCMU_CHAN_SB_NONE:
28 return BRCMU_CHSPEC_D11N_SB_N;
29 case BRCMU_CHAN_SB_L:
30 return BRCMU_CHSPEC_D11N_SB_L;
31 case BRCMU_CHAN_SB_U:
32 return BRCMU_CHSPEC_D11N_SB_U;
33 default:
34 WARN_ON(1);
35 }
36 return 0;
37}
27 38
28 switch (ch->bw) { 39static u16 d11n_bw(enum brcmu_chan_bw bw)
40{
41 switch (bw) {
29 case BRCMU_CHAN_BW_20: 42 case BRCMU_CHAN_BW_20:
30 ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N; 43 return BRCMU_CHSPEC_D11N_BW_20;
31 break;
32 case BRCMU_CHAN_BW_40: 44 case BRCMU_CHAN_BW_40:
45 return BRCMU_CHSPEC_D11N_BW_40;
33 default: 46 default:
34 WARN_ON_ONCE(1); 47 WARN_ON(1);
35 break;
36 } 48 }
49 return 0;
50}
51
52static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
53{
54 if (ch->bw == BRCMU_CHAN_BW_20)
55 ch->sb = BRCMU_CHAN_SB_NONE;
56
57 ch->chspec = 0;
58 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
59 BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
60 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
61 0, d11n_sb(ch->sb));
62 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
63 0, d11n_bw(ch->bw));
37 64
38 if (ch->chnum <= CH_MAX_2G_CHANNEL) 65 if (ch->chnum <= CH_MAX_2G_CHANNEL)
39 ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G; 66 ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
@@ -41,23 +68,34 @@ static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
41 ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G; 68 ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
42} 69}
43 70
44static void brcmu_d11ac_encchspec(struct brcmu_chan *ch) 71static u16 d11ac_bw(enum brcmu_chan_bw bw)
45{ 72{
46 ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK; 73 switch (bw) {
47
48 switch (ch->bw) {
49 case BRCMU_CHAN_BW_20: 74 case BRCMU_CHAN_BW_20:
50 ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20; 75 return BRCMU_CHSPEC_D11AC_BW_20;
51 break;
52 case BRCMU_CHAN_BW_40: 76 case BRCMU_CHAN_BW_40:
77 return BRCMU_CHSPEC_D11AC_BW_40;
53 case BRCMU_CHAN_BW_80: 78 case BRCMU_CHAN_BW_80:
54 case BRCMU_CHAN_BW_80P80: 79 return BRCMU_CHSPEC_D11AC_BW_80;
55 case BRCMU_CHAN_BW_160:
56 default: 80 default:
57 WARN_ON_ONCE(1); 81 WARN_ON(1);
58 break;
59 } 82 }
83 return 0;
84}
60 85
86static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
87{
88 if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
89 ch->sb = BRCMU_CHAN_SB_L;
90
91 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
92 BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
93 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
94 BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
95 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
96 0, d11ac_bw(ch->bw));
97
98 ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
61 if (ch->chnum <= CH_MAX_2G_CHANNEL) 99 if (ch->chnum <= CH_MAX_2G_CHANNEL)
62 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G; 100 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
63 else 101 else
@@ -73,6 +111,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
73 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { 111 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
74 case BRCMU_CHSPEC_D11N_BW_20: 112 case BRCMU_CHSPEC_D11N_BW_20:
75 ch->bw = BRCMU_CHAN_BW_20; 113 ch->bw = BRCMU_CHAN_BW_20;
114 ch->sb = BRCMU_CHAN_SB_NONE;
76 break; 115 break;
77 case BRCMU_CHSPEC_D11N_BW_40: 116 case BRCMU_CHSPEC_D11N_BW_40:
78 ch->bw = BRCMU_CHAN_BW_40; 117 ch->bw = BRCMU_CHAN_BW_40;
@@ -112,6 +151,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
112 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { 151 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
113 case BRCMU_CHSPEC_D11AC_BW_20: 152 case BRCMU_CHSPEC_D11AC_BW_20:
114 ch->bw = BRCMU_CHAN_BW_20; 153 ch->bw = BRCMU_CHAN_BW_20;
154 ch->sb = BRCMU_CHAN_SB_NONE;
115 break; 155 break;
116 case BRCMU_CHSPEC_D11AC_BW_40: 156 case BRCMU_CHSPEC_D11AC_BW_40:
117 ch->bw = BRCMU_CHAN_BW_40; 157 ch->bw = BRCMU_CHAN_BW_40;
@@ -128,6 +168,25 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
128 break; 168 break;
129 case BRCMU_CHSPEC_D11AC_BW_80: 169 case BRCMU_CHSPEC_D11AC_BW_80:
130 ch->bw = BRCMU_CHAN_BW_80; 170 ch->bw = BRCMU_CHAN_BW_80;
171 ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
172 BRCMU_CHSPEC_D11AC_SB_SHIFT);
173 switch (ch->sb) {
174 case BRCMU_CHAN_SB_LL:
175 ch->chnum -= CH_30MHZ_APART;
176 break;
177 case BRCMU_CHAN_SB_LU:
178 ch->chnum -= CH_10MHZ_APART;
179 break;
180 case BRCMU_CHAN_SB_UL:
181 ch->chnum += CH_10MHZ_APART;
182 break;
183 case BRCMU_CHAN_SB_UU:
184 ch->chnum += CH_30MHZ_APART;
185 break;
186 default:
187 WARN_ON_ONCE(1);
188 break;
189 }
131 break; 190 break;
132 case BRCMU_CHSPEC_D11AC_BW_8080: 191 case BRCMU_CHSPEC_D11AC_BW_8080:
133 case BRCMU_CHSPEC_D11AC_BW_160: 192 case BRCMU_CHSPEC_D11AC_BW_160:
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 8660a2cba098..f9745ea8b3e0 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -108,13 +108,7 @@ enum brcmu_chan_bw {
108}; 108};
109 109
110enum brcmu_chan_sb { 110enum brcmu_chan_sb {
111 BRCMU_CHAN_SB_NONE = 0, 111 BRCMU_CHAN_SB_NONE = -1,
112 BRCMU_CHAN_SB_L,
113 BRCMU_CHAN_SB_U,
114 BRCMU_CHAN_SB_LL,
115 BRCMU_CHAN_SB_LU,
116 BRCMU_CHAN_SB_UL,
117 BRCMU_CHAN_SB_UU,
118 BRCMU_CHAN_SB_LLL, 112 BRCMU_CHAN_SB_LLL,
119 BRCMU_CHAN_SB_LLU, 113 BRCMU_CHAN_SB_LLU,
120 BRCMU_CHAN_SB_LUL, 114 BRCMU_CHAN_SB_LUL,
@@ -123,6 +117,12 @@ enum brcmu_chan_sb {
123 BRCMU_CHAN_SB_ULU, 117 BRCMU_CHAN_SB_ULU,
124 BRCMU_CHAN_SB_UUL, 118 BRCMU_CHAN_SB_UUL,
125 BRCMU_CHAN_SB_UUU, 119 BRCMU_CHAN_SB_UUU,
120 BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
121 BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
122 BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
123 BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
124 BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
125 BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
126}; 126};
127 127
128struct brcmu_chan { 128struct brcmu_chan {
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 74419d4bd123..76b5d3a86294 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -29,6 +29,7 @@
29#define CH_UPPER_SB 0x01 29#define CH_UPPER_SB 0x01
30#define CH_LOWER_SB 0x02 30#define CH_LOWER_SB 0x02
31#define CH_EWA_VALID 0x04 31#define CH_EWA_VALID 0x04
32#define CH_30MHZ_APART 6
32#define CH_20MHZ_APART 4 33#define CH_20MHZ_APART 4
33#define CH_10MHZ_APART 2 34#define CH_10MHZ_APART 2
34#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ 35#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 103f7bce8932..cd0cad7f7759 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
936 return ret; 936 return ret;
937} 937}
938 938
939void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 939void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
940 u32 queues, bool drop)
940{ 941{
941 struct cw1200_common *priv = hw->priv; 942 struct cw1200_common *priv = hw->priv;
942 943
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index 35babb62cc6a..b7e386b7662b 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
40 40
41int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value); 41int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
42 42
43void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop); 43void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
44 u32 queues, bool drop);
44 45
45u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, 46u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
46 struct netdev_hw_addr_list *mc_list); 47 struct netdev_hw_addr_list *mc_list);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 67db34e56d7e..52919ad42726 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -882,7 +882,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
882 dev->mtu = local->mtu; 882 dev->mtu = local->mtu;
883 883
884 884
885 SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops); 885 dev->ethtool_ops = &prism2_ethtool_ops;
886 886
887} 887}
888 888
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index d37a6fd90d40..b598e2803500 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
573 rx_status.flag |= RX_FLAG_SHORTPRE; 573 rx_status.flag |= RX_FLAG_SHORTPRE;
574 574
575 if ((unlikely(rx_stats->phy_count > 20))) { 575 if ((unlikely(rx_stats->phy_count > 20))) {
576 D_DROP("dsp size out of range [0,20]: %d/n", 576 D_DROP("dsp size out of range [0,20]: %d\n",
577 rx_stats->phy_count); 577 rx_stats->phy_count);
578 return; 578 return;
579 } 579 }
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 888ad5c74639..c159c05db6ef 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
670 } 670 }
671 671
672 if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 672 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
673 D_DROP("dsp size out of range [0,20]: %d/n", 673 D_DROP("dsp size out of range [0,20]: %d\n",
674 phy_res->cfg_phy_cnt); 674 phy_res->cfg_phy_cnt);
675 return; 675 return;
676 } 676 }
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 4f42174d9994..ecc674627e6e 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4755,7 +4755,8 @@ out:
4755} 4755}
4756EXPORT_SYMBOL(il_mac_change_interface); 4756EXPORT_SYMBOL(il_mac_change_interface);
4757 4757
4758void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 4758void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4759 u32 queues, bool drop)
4759{ 4760{
4760 struct il_priv *il = hw->priv; 4761 struct il_priv *il = hw->priv;
4761 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4762 unsigned long timeout = jiffies + msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index dfb13c70efe8..ea5c0f863c4e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
1723 struct ieee80211_vif *vif); 1723 struct ieee80211_vif *vif);
1724int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1724int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1725 enum nl80211_iftype newtype, bool newp2p); 1725 enum nl80211_iftype newtype, bool newp2p);
1726void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop); 1726void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1727 u32 queues, bool drop);
1727int il_alloc_txq_mem(struct il_priv *il); 1728int il_alloc_txq_mem(struct il_priv *il);
1728void il_free_txq_mem(struct il_priv *il); 1729void il_free_txq_mem(struct il_priv *il);
1729 1730
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 74b3b4de7bb7..7fd50428b934 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -2,10 +2,6 @@ config IWLWIFI
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
3 depends on PCI && MAC80211 && HAS_IOMEM 3 depends on PCI && MAC80211 && HAS_IOMEM
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9 ---help--- 5 ---help---
10 Select to build the driver supporting the: 6 Select to build the driver supporting the:
11 7
@@ -43,6 +39,14 @@ config IWLWIFI
43 say M here and read <file:Documentation/kbuild/modules.txt>. The 39 say M here and read <file:Documentation/kbuild/modules.txt>. The
44 module will be called iwlwifi. 40 module will be called iwlwifi.
45 41
42config IWLWIFI_LEDS
43 bool
44 depends on IWLWIFI
45 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
46 select LEDS_TRIGGERS
47 select MAC80211_LEDS
48 default y
49
46config IWLDVM 50config IWLDVM
47 tristate "Intel Wireless WiFi DVM Firmware support" 51 tristate "Intel Wireless WiFi DVM Firmware support"
48 depends on IWLWIFI 52 depends on IWLWIFI
@@ -124,7 +128,6 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
124 Enable use of experimental ucode for testing and debugging. 128 Enable use of experimental ucode for testing and debugging.
125 129
126config IWLWIFI_DEVICE_TRACING 130config IWLWIFI_DEVICE_TRACING
127
128 bool "iwlwifi device access tracing" 131 bool "iwlwifi device access tracing"
129 depends on IWLWIFI 132 depends on IWLWIFI
130 depends on EVENT_TRACING 133 depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
index dce7ab2e0c4b..4d19685f31c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -4,9 +4,10 @@ iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o 4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
5 5
6iwldvm-objs += power.o 6iwldvm-objs += power.o
7iwldvm-objs += scan.o led.o 7iwldvm-objs += scan.o
8iwldvm-objs += rxon.o devices.o 8iwldvm-objs += rxon.o devices.o
9 9
10iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 11iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
11 12
12ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ 13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index be1086c87157..20e6aa910700 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -94,7 +94,6 @@ int iwl_send_calib_results(struct iwl_priv *priv)
94{ 94{
95 struct iwl_host_cmd hcmd = { 95 struct iwl_host_cmd hcmd = {
96 .id = REPLY_PHY_CALIBRATION_CMD, 96 .id = REPLY_PHY_CALIBRATION_CMD,
97 .flags = CMD_SYNC,
98 }; 97 };
99 struct iwl_calib_result *res; 98 struct iwl_calib_result *res;
100 99
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d2fe2596d54e..0ffb6ff1a255 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -1481,7 +1481,7 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1481 1481
1482 /* make request to uCode to retrieve statistics information */ 1482 /* make request to uCode to retrieve statistics information */
1483 mutex_lock(&priv->mutex); 1483 mutex_lock(&priv->mutex);
1484 ret = iwl_send_statistics_request(priv, CMD_SYNC, false); 1484 ret = iwl_send_statistics_request(priv, 0, false);
1485 mutex_unlock(&priv->mutex); 1485 mutex_unlock(&priv->mutex);
1486 1486
1487 if (ret) 1487 if (ret)
@@ -1868,7 +1868,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1868 1868
1869 /* make request to uCode to retrieve statistics information */ 1869 /* make request to uCode to retrieve statistics information */
1870 mutex_lock(&priv->mutex); 1870 mutex_lock(&priv->mutex);
1871 iwl_send_statistics_request(priv, CMD_SYNC, true); 1871 iwl_send_statistics_request(priv, 0, true);
1872 mutex_unlock(&priv->mutex); 1872 mutex_unlock(&priv->mutex);
1873 1873
1874 return count; 1874 return count;
@@ -2188,7 +2188,6 @@ static int iwl_cmd_echo_test(struct iwl_priv *priv)
2188 struct iwl_host_cmd cmd = { 2188 struct iwl_host_cmd cmd = {
2189 .id = REPLY_ECHO, 2189 .id = REPLY_ECHO,
2190 .len = { 0 }, 2190 .len = { 0 },
2191 .flags = CMD_SYNC,
2192 }; 2191 };
2193 2192
2194 ret = iwl_dvm_send_cmd(priv, &cmd); 2193 ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -2320,7 +2319,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2320 mutex_lock(&priv->mutex); 2319 mutex_lock(&priv->mutex);
2321 2320
2322 /* take the return value to make compiler happy - it will fail anyway */ 2321 /* take the return value to make compiler happy - it will fail anyway */
2323 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL); 2322 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
2324 2323
2325 mutex_unlock(&priv->mutex); 2324 mutex_unlock(&priv->mutex);
2326 2325
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3441f70d0ff9..a6f22c32a279 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -888,9 +888,11 @@ struct iwl_priv {
888 888
889 struct iwl_event_log event_log; 889 struct iwl_event_log event_log;
890 890
891#ifdef CONFIG_IWLWIFI_LEDS
891 struct led_classdev led; 892 struct led_classdev led;
892 unsigned long blink_on, blink_off; 893 unsigned long blink_on, blink_off;
893 bool led_registered; 894 bool led_registered;
895#endif
894 896
895 /* WoWLAN GTK rekey data */ 897 /* WoWLAN GTK rekey data */
896 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; 898 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 758c54eeb206..34b41e5f7cfc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -417,7 +417,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
417 struct iwl_host_cmd hcmd = { 417 struct iwl_host_cmd hcmd = {
418 .id = REPLY_CHANNEL_SWITCH, 418 .id = REPLY_CHANNEL_SWITCH,
419 .len = { sizeof(cmd), }, 419 .len = { sizeof(cmd), },
420 .flags = CMD_SYNC,
421 .data = { &cmd, }, 420 .data = { &cmd, },
422 }; 421 };
423 422
@@ -579,7 +578,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
579 struct iwl_host_cmd hcmd = { 578 struct iwl_host_cmd hcmd = {
580 .id = REPLY_CHANNEL_SWITCH, 579 .id = REPLY_CHANNEL_SWITCH,
581 .len = { sizeof(*cmd), }, 580 .len = { sizeof(*cmd), },
582 .flags = CMD_SYNC,
583 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 581 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
584 }; 582 };
585 int err; 583 int err;
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 6a0817d9c4fa..1c6b2252d0f2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -36,8 +36,20 @@ struct iwl_priv;
36#define IWL_LED_ACTIVITY (0<<1) 36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1) 37#define IWL_LED_LINK (1<<1)
38 38
39#ifdef CONFIG_IWLWIFI_LEDS
39void iwlagn_led_enable(struct iwl_priv *priv); 40void iwlagn_led_enable(struct iwl_priv *priv);
40void iwl_leds_init(struct iwl_priv *priv); 41void iwl_leds_init(struct iwl_priv *priv);
41void iwl_leds_exit(struct iwl_priv *priv); 42void iwl_leds_exit(struct iwl_priv *priv);
43#else
44static inline void iwlagn_led_enable(struct iwl_priv *priv)
45{
46}
47static inline void iwl_leds_init(struct iwl_priv *priv)
48{
49}
50static inline void iwl_leds_exit(struct iwl_priv *priv)
51{
52}
53#endif
42 54
43#endif /* __iwl_leds_h__ */ 55#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 576f7ee38ca5..2191621d69c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -81,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
81 else 81 else
82 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 82 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
83 83
84 return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC, 84 return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
85 sizeof(tx_power_cmd), &tx_power_cmd); 85 sizeof(tx_power_cmd), &tx_power_cmd);
86} 86}
87 87
@@ -141,7 +141,6 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
141 struct iwl_host_cmd cmd = { 141 struct iwl_host_cmd cmd = {
142 .id = REPLY_TXFIFO_FLUSH, 142 .id = REPLY_TXFIFO_FLUSH,
143 .len = { sizeof(struct iwl_txfifo_flush_cmd), }, 143 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
144 .flags = CMD_SYNC,
145 .data = { &flush_cmd, }, 144 .data = { &flush_cmd, },
146 }; 145 };
147 146
@@ -180,7 +179,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
180 goto done; 179 goto done;
181 } 180 }
182 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); 181 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
183 iwl_trans_wait_tx_queue_empty(priv->trans); 182 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
184done: 183done:
185 ieee80211_wake_queues(priv->hw); 184 ieee80211_wake_queues(priv->hw);
186 mutex_unlock(&priv->mutex); 185 mutex_unlock(&priv->mutex);
@@ -333,12 +332,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
333 memcpy(&bt_cmd_v2.basic, &basic, 332 memcpy(&bt_cmd_v2.basic, &basic,
334 sizeof(basic)); 333 sizeof(basic));
335 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 334 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
336 CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2); 335 0, sizeof(bt_cmd_v2), &bt_cmd_v2);
337 } else { 336 } else {
338 memcpy(&bt_cmd_v1.basic, &basic, 337 memcpy(&bt_cmd_v1.basic, &basic,
339 sizeof(basic)); 338 sizeof(basic));
340 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 339 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
341 CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1); 340 0, sizeof(bt_cmd_v1), &bt_cmd_v1);
342 } 341 }
343 if (ret) 342 if (ret)
344 IWL_ERR(priv, "failed to send BT Coex Config\n"); 343 IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -1044,7 +1043,6 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
1044 struct iwl_host_cmd cmd = { 1043 struct iwl_host_cmd cmd = {
1045 .id = REPLY_WOWLAN_PATTERNS, 1044 .id = REPLY_WOWLAN_PATTERNS,
1046 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1045 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1047 .flags = CMD_SYNC,
1048 }; 1046 };
1049 int i, err; 1047 int i, err;
1050 1048
@@ -1201,7 +1199,6 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1201 if (key_data.use_rsc_tsc) { 1199 if (key_data.use_rsc_tsc) {
1202 struct iwl_host_cmd rsc_tsc_cmd = { 1200 struct iwl_host_cmd rsc_tsc_cmd = {
1203 .id = REPLY_WOWLAN_TSC_RSC_PARAMS, 1201 .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
1204 .flags = CMD_SYNC,
1205 .data[0] = key_data.rsc_tsc, 1202 .data[0] = key_data.rsc_tsc,
1206 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1203 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1207 .len[0] = sizeof(*key_data.rsc_tsc), 1204 .len[0] = sizeof(*key_data.rsc_tsc),
@@ -1215,7 +1212,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1215 if (key_data.use_tkip) { 1212 if (key_data.use_tkip) {
1216 ret = iwl_dvm_send_cmd_pdu(priv, 1213 ret = iwl_dvm_send_cmd_pdu(priv,
1217 REPLY_WOWLAN_TKIP_PARAMS, 1214 REPLY_WOWLAN_TKIP_PARAMS,
1218 CMD_SYNC, sizeof(tkip_cmd), 1215 0, sizeof(tkip_cmd),
1219 &tkip_cmd); 1216 &tkip_cmd);
1220 if (ret) 1217 if (ret)
1221 goto out; 1218 goto out;
@@ -1231,20 +1228,20 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1231 1228
1232 ret = iwl_dvm_send_cmd_pdu(priv, 1229 ret = iwl_dvm_send_cmd_pdu(priv,
1233 REPLY_WOWLAN_KEK_KCK_MATERIAL, 1230 REPLY_WOWLAN_KEK_KCK_MATERIAL,
1234 CMD_SYNC, sizeof(kek_kck_cmd), 1231 0, sizeof(kek_kck_cmd),
1235 &kek_kck_cmd); 1232 &kek_kck_cmd);
1236 if (ret) 1233 if (ret)
1237 goto out; 1234 goto out;
1238 } 1235 }
1239 } 1236 }
1240 1237
1241 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC, 1238 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
1242 sizeof(d3_cfg_cmd), &d3_cfg_cmd); 1239 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1243 if (ret) 1240 if (ret)
1244 goto out; 1241 goto out;
1245 1242
1246 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER, 1243 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
1247 CMD_SYNC, sizeof(wakeup_filter_cmd), 1244 0, sizeof(wakeup_filter_cmd),
1248 &wakeup_filter_cmd); 1245 &wakeup_filter_cmd);
1249 if (ret) 1246 if (ret)
1250 goto out; 1247 goto out;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index dd55c9cf7ba8..29af7b51e370 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
1091 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 1091 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1092} 1092}
1093 1093
1094static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1094static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1095 u32 queues, bool drop)
1095{ 1096{
1096 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1097 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1097 1098
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1119 } 1120 }
1120 } 1121 }
1121 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 1122 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
1122 iwl_trans_wait_tx_queue_empty(priv->trans); 1123 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
1123done: 1124done:
1124 mutex_unlock(&priv->mutex); 1125 mutex_unlock(&priv->mutex);
1125 IWL_DEBUG_MAC80211(priv, "leave\n"); 1126 IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 6a6df71af1d7..0b7f46f0b079 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -128,7 +128,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
128 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 128 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
129 struct iwl_host_cmd cmd = { 129 struct iwl_host_cmd cmd = {
130 .id = REPLY_TX_BEACON, 130 .id = REPLY_TX_BEACON,
131 .flags = CMD_SYNC,
132 }; 131 };
133 struct ieee80211_tx_info *info; 132 struct ieee80211_tx_info *info;
134 u32 frame_size; 133 u32 frame_size;
@@ -311,8 +310,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
311 sizeof(struct iwl_statistics_cmd), 310 sizeof(struct iwl_statistics_cmd),
312 &statistics_cmd); 311 &statistics_cmd);
313 else 312 else
314 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 313 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
315 CMD_SYNC,
316 sizeof(struct iwl_statistics_cmd), 314 sizeof(struct iwl_statistics_cmd),
317 &statistics_cmd); 315 &statistics_cmd);
318} 316}
@@ -622,7 +620,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
622 620
623 ret = iwl_dvm_send_cmd_pdu(priv, 621 ret = iwl_dvm_send_cmd_pdu(priv,
624 REPLY_CT_KILL_CONFIG_CMD, 622 REPLY_CT_KILL_CONFIG_CMD,
625 CMD_SYNC, sizeof(adv_cmd), &adv_cmd); 623 0, sizeof(adv_cmd), &adv_cmd);
626 if (ret) 624 if (ret)
627 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); 625 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
628 else 626 else
@@ -637,7 +635,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
637 635
638 ret = iwl_dvm_send_cmd_pdu(priv, 636 ret = iwl_dvm_send_cmd_pdu(priv,
639 REPLY_CT_KILL_CONFIG_CMD, 637 REPLY_CT_KILL_CONFIG_CMD,
640 CMD_SYNC, sizeof(cmd), &cmd); 638 0, sizeof(cmd), &cmd);
641 if (ret) 639 if (ret)
642 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); 640 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
643 else 641 else
@@ -673,9 +671,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
673 671
674 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) { 672 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
675 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); 673 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
676 return iwl_dvm_send_cmd_pdu(priv, 674 return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
677 TX_ANT_CONFIGURATION_CMD,
678 CMD_SYNC,
679 sizeof(struct iwl_tx_ant_config_cmd), 675 sizeof(struct iwl_tx_ant_config_cmd),
680 &tx_ant_cmd); 676 &tx_ant_cmd);
681 } else { 677 } else {
@@ -703,7 +699,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
703 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 699 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
704 700
705 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 701 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
706 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd)) 702 0, sizeof(struct iwl_bt_cmd), &bt_cmd))
707 IWL_ERR(priv, "failed to send BT Coex Config\n"); 703 IWL_ERR(priv, "failed to send BT Coex Config\n");
708} 704}
709 705
@@ -987,7 +983,7 @@ static void iwl_bg_restart(struct work_struct *data)
987 ieee80211_restart_hw(priv->hw); 983 ieee80211_restart_hw(priv->hw);
988 else 984 else
989 IWL_ERR(priv, 985 IWL_ERR(priv,
990 "Cannot request restart before registrating with mac80211"); 986 "Cannot request restart before registrating with mac80211\n");
991 } else { 987 } else {
992 WARN_ON(1); 988 WARN_ON(1);
993 } 989 }
@@ -1127,7 +1123,6 @@ static void iwl_option_config(struct iwl_priv *priv)
1127static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) 1123static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1128{ 1124{
1129 struct iwl_nvm_data *data = priv->nvm_data; 1125 struct iwl_nvm_data *data = priv->nvm_data;
1130 char *debug_msg;
1131 1126
1132 if (data->sku_cap_11n_enable && 1127 if (data->sku_cap_11n_enable &&
1133 !priv->cfg->ht_params) { 1128 !priv->cfg->ht_params) {
@@ -1141,8 +1136,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1141 return -EINVAL; 1136 return -EINVAL;
1142 } 1137 }
1143 1138
1144 debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n"; 1139 IWL_DEBUG_INFO(priv,
1145 IWL_DEBUG_INFO(priv, debug_msg, 1140 "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
1146 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", 1141 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
1147 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", 1142 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
1148 data->sku_cap_11n_enable ? "" : "NOT", "enabled"); 1143 data->sku_cap_11n_enable ? "" : "NOT", "enabled");
@@ -1350,7 +1345,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1350 iwl_set_hw_params(priv); 1345 iwl_set_hw_params(priv);
1351 1346
1352 if (!(priv->nvm_data->sku_cap_ipan_enable)) { 1347 if (!(priv->nvm_data->sku_cap_ipan_enable)) {
1353 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1348 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
1354 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1349 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1355 /* 1350 /*
1356 * if not PAN, then don't support P2P -- might be a uCode 1351 * if not PAN, then don't support P2P -- might be a uCode
@@ -2019,10 +2014,10 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
2019 2014
2020 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) { 2015 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
2021 if (!test_bit(mq, &priv->transport_queue_stop)) { 2016 if (!test_bit(mq, &priv->transport_queue_stop)) {
2022 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq); 2017 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
2023 ieee80211_wake_queue(priv->hw, mq); 2018 ieee80211_wake_queue(priv->hw, mq);
2024 } else { 2019 } else {
2025 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq); 2020 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
2026 } 2021 }
2027 } 2022 }
2028 2023
@@ -2053,6 +2048,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2053 return false; 2048 return false;
2054} 2049}
2055 2050
2051static void iwl_napi_add(struct iwl_op_mode *op_mode,
2052 struct napi_struct *napi,
2053 struct net_device *napi_dev,
2054 int (*poll)(struct napi_struct *, int),
2055 int weight)
2056{
2057 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2058
2059 ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
2060}
2061
2056static const struct iwl_op_mode_ops iwl_dvm_ops = { 2062static const struct iwl_op_mode_ops iwl_dvm_ops = {
2057 .start = iwl_op_mode_dvm_start, 2063 .start = iwl_op_mode_dvm_start,
2058 .stop = iwl_op_mode_dvm_stop, 2064 .stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2071,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
2065 .cmd_queue_full = iwl_cmd_queue_full, 2071 .cmd_queue_full = iwl_cmd_queue_full,
2066 .nic_config = iwl_nic_config, 2072 .nic_config = iwl_nic_config,
2067 .wimax_active = iwl_wimax_active, 2073 .wimax_active = iwl_wimax_active,
2074 .napi_add = iwl_napi_add,
2068}; 2075};
2069 2076
2070/***************************************************************************** 2077/*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index b4e61417013a..f2c1439566b5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -278,7 +278,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
278 le32_to_cpu(cmd->sleep_interval[3]), 278 le32_to_cpu(cmd->sleep_interval[3]),
279 le32_to_cpu(cmd->sleep_interval[4])); 279 le32_to_cpu(cmd->sleep_interval[4]));
280 280
281 return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC, 281 return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
282 sizeof(struct iwl_powertable_cmd), cmd); 282 sizeof(struct iwl_powertable_cmd), cmd);
283} 283}
284 284
@@ -361,7 +361,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
361 361
362 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); 362 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
363 } else 363 } else
364 IWL_ERR(priv, "set power fail, ret = %d", ret); 364 IWL_ERR(priv, "set power fail, ret = %d\n", ret);
365 365
366 return ret; 366 return ret;
367} 367}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index aa773a2da4ab..32b78a66536d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1453,7 +1453,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1453 tbl->action = IWL_LEGACY_SWITCH_SISO; 1453 tbl->action = IWL_LEGACY_SWITCH_SISO;
1454 break; 1454 break;
1455 default: 1455 default:
1456 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1456 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1457 break; 1457 break;
1458 } 1458 }
1459 1459
@@ -1628,7 +1628,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1628 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1628 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1629 break; 1629 break;
1630 default: 1630 default:
1631 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1631 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1632 break; 1632 break;
1633 } 1633 }
1634 1634
@@ -1799,7 +1799,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1799 tbl->action = IWL_MIMO2_SWITCH_SISO_A; 1799 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1800 break; 1800 break;
1801 default: 1801 default:
1802 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1802 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1803 break; 1803 break;
1804 } 1804 }
1805 1805
@@ -1969,7 +1969,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1969 tbl->action = IWL_MIMO3_SWITCH_SISO_A; 1969 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1970 break; 1970 break;
1971 default: 1971 default:
1972 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load); 1972 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1973 break; 1973 break;
1974 } 1974 }
1975 1975
@@ -2709,7 +2709,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2709 rs_set_expected_tpt_table(lq_sta, tbl); 2709 rs_set_expected_tpt_table(lq_sta, tbl);
2710 rs_fill_link_cmd(NULL, lq_sta, rate); 2710 rs_fill_link_cmd(NULL, lq_sta, rate);
2711 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; 2711 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2712 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true); 2712 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
2713} 2713}
2714 2714
2715static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, 2715static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cd8377346aff..debec963c610 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -786,7 +786,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
786 786
787 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 787 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
788 788
789 ieee80211_rx_ni(priv->hw, skb); 789 ieee80211_rx(priv->hw, skb);
790} 790}
791 791
792static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 792static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 503a81e58185..ed50de6362ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -104,7 +104,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
104 104
105 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 105 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
106 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 106 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
107 CMD_SYNC, sizeof(*send), send); 107 0, sizeof(*send), send);
108 108
109 send->filter_flags = old_filter; 109 send->filter_flags = old_filter;
110 110
@@ -134,7 +134,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
134 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 134 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
135 send->dev_type = RXON_DEV_TYPE_P2P; 135 send->dev_type = RXON_DEV_TYPE_P2P;
136 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 136 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
137 CMD_SYNC, sizeof(*send), send); 137 0, sizeof(*send), send);
138 138
139 send->filter_flags = old_filter; 139 send->filter_flags = old_filter;
140 send->dev_type = old_dev_type; 140 send->dev_type = old_dev_type;
@@ -160,7 +160,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
160 int ret; 160 int ret;
161 161
162 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 162 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
163 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC, 163 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
164 sizeof(*send), send); 164 sizeof(*send), send);
165 165
166 send->filter_flags = old_filter; 166 send->filter_flags = old_filter;
@@ -189,7 +189,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
189 ctx->qos_data.qos_active, 189 ctx->qos_data.qos_active,
190 ctx->qos_data.def_qos_parm.qos_flags); 190 ctx->qos_data.def_qos_parm.qos_flags);
191 191
192 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC, 192 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
193 sizeof(struct iwl_qosparam_cmd), 193 sizeof(struct iwl_qosparam_cmd),
194 &ctx->qos_data.def_qos_parm); 194 &ctx->qos_data.def_qos_parm);
195 if (ret) 195 if (ret)
@@ -353,7 +353,7 @@ static int iwl_send_rxon_timing(struct iwl_priv *priv,
353 le16_to_cpu(ctx->timing.atim_window)); 353 le16_to_cpu(ctx->timing.atim_window));
354 354
355 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, 355 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
356 CMD_SYNC, sizeof(ctx->timing), &ctx->timing); 356 0, sizeof(ctx->timing), &ctx->timing);
357} 357}
358 358
359static int iwlagn_rxon_disconn(struct iwl_priv *priv, 359static int iwlagn_rxon_disconn(struct iwl_priv *priv,
@@ -495,7 +495,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
495 * Associated RXON doesn't clear the station table in uCode, 495 * Associated RXON doesn't clear the station table in uCode,
496 * so we don't need to restore stations etc. after this. 496 * so we don't need to restore stations etc. after this.
497 */ 497 */
498 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC, 498 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
499 sizeof(struct iwl_rxon_cmd), &ctx->staging); 499 sizeof(struct iwl_rxon_cmd), &ctx->staging);
500 if (ret) { 500 if (ret) {
501 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 501 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -610,7 +610,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
610 cmd.slots[0].width = cpu_to_le16(slot0); 610 cmd.slots[0].width = cpu_to_le16(slot0);
611 cmd.slots[1].width = cpu_to_le16(slot1); 611 cmd.slots[1].width = cpu_to_le16(slot1);
612 612
613 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC, 613 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
614 sizeof(cmd), &cmd); 614 sizeof(cmd), &cmd);
615 if (ret) 615 if (ret)
616 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); 616 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -823,7 +823,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
823 823
824 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) 824 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
825 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 825 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
826 IWL_WARN(priv, "CCK and auto detect"); 826 IWL_WARN(priv, "CCK and auto detect\n");
827 errors |= BIT(8); 827 errors |= BIT(8);
828 } 828 }
829 829
@@ -1395,7 +1395,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1395 priv->phy_calib_chain_noise_reset_cmd); 1395 priv->phy_calib_chain_noise_reset_cmd);
1396 ret = iwl_dvm_send_cmd_pdu(priv, 1396 ret = iwl_dvm_send_cmd_pdu(priv,
1397 REPLY_PHY_CALIBRATION_CMD, 1397 REPLY_PHY_CALIBRATION_CMD,
1398 CMD_SYNC, sizeof(cmd), &cmd); 1398 0, sizeof(cmd), &cmd);
1399 if (ret) 1399 if (ret)
1400 IWL_ERR(priv, 1400 IWL_ERR(priv,
1401 "Could not send REPLY_PHY_CALIBRATION_CMD\n"); 1401 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index be98b913ed58..43bef901e8f9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -59,7 +59,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
59 int ret; 59 int ret;
60 struct iwl_host_cmd cmd = { 60 struct iwl_host_cmd cmd = {
61 .id = REPLY_SCAN_ABORT_CMD, 61 .id = REPLY_SCAN_ABORT_CMD,
62 .flags = CMD_SYNC | CMD_WANT_SKB, 62 .flags = CMD_WANT_SKB,
63 }; 63 };
64 __le32 *status; 64 __le32 *status;
65 65
@@ -639,7 +639,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
639 struct iwl_host_cmd cmd = { 639 struct iwl_host_cmd cmd = {
640 .id = REPLY_SCAN_CMD, 640 .id = REPLY_SCAN_CMD,
641 .len = { sizeof(struct iwl_scan_cmd), }, 641 .len = { sizeof(struct iwl_scan_cmd), },
642 .flags = CMD_SYNC,
643 }; 642 };
644 struct iwl_scan_cmd *scan; 643 struct iwl_scan_cmd *scan;
645 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 644 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 9cdd91cdf661..6ec86adbe4a1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -39,7 +39,7 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
39 lockdep_assert_held(&priv->sta_lock); 39 lockdep_assert_held(&priv->sta_lock);
40 40
41 if (sta_id >= IWLAGN_STATION_COUNT) { 41 if (sta_id >= IWLAGN_STATION_COUNT) {
42 IWL_ERR(priv, "invalid sta_id %u", sta_id); 42 IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
43 return -EINVAL; 43 return -EINVAL;
44 } 44 }
45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
@@ -165,7 +165,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
165 iwl_free_resp(&cmd); 165 iwl_free_resp(&cmd);
166 166
167 if (cmd.handler_status) 167 if (cmd.handler_status)
168 IWL_ERR(priv, "%s - error in the CMD response %d", __func__, 168 IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
169 cmd.handler_status); 169 cmd.handler_status);
170 170
171 return cmd.handler_status; 171 return cmd.handler_status;
@@ -261,7 +261,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
261 cmd.station_flags = flags; 261 cmd.station_flags = flags;
262 cmd.sta.sta_id = sta_id; 262 cmd.sta.sta_id = sta_id;
263 263
264 return iwl_send_add_sta(priv, &cmd, CMD_SYNC); 264 return iwl_send_add_sta(priv, &cmd, 0);
265} 265}
266 266
267static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 267static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
@@ -413,7 +413,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
413 spin_unlock_bh(&priv->sta_lock); 413 spin_unlock_bh(&priv->sta_lock);
414 414
415 /* Add station to device's station table */ 415 /* Add station to device's station table */
416 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 416 ret = iwl_send_add_sta(priv, &sta_cmd, 0);
417 if (ret) { 417 if (ret) {
418 spin_lock_bh(&priv->sta_lock); 418 spin_lock_bh(&priv->sta_lock);
419 IWL_ERR(priv, "Adding station %pM failed.\n", 419 IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -456,7 +456,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
456 struct iwl_host_cmd cmd = { 456 struct iwl_host_cmd cmd = {
457 .id = REPLY_REMOVE_STA, 457 .id = REPLY_REMOVE_STA,
458 .len = { sizeof(struct iwl_rem_sta_cmd), }, 458 .len = { sizeof(struct iwl_rem_sta_cmd), },
459 .flags = CMD_SYNC,
460 .data = { &rm_sta_cmd, }, 459 .data = { &rm_sta_cmd, },
461 }; 460 };
462 461
@@ -740,7 +739,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
740 send_lq = true; 739 send_lq = true;
741 } 740 }
742 spin_unlock_bh(&priv->sta_lock); 741 spin_unlock_bh(&priv->sta_lock);
743 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 742 ret = iwl_send_add_sta(priv, &sta_cmd, 0);
744 if (ret) { 743 if (ret) {
745 spin_lock_bh(&priv->sta_lock); 744 spin_lock_bh(&priv->sta_lock);
746 IWL_ERR(priv, "Adding station %pM failed.\n", 745 IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -756,8 +755,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
756 * current LQ command 755 * current LQ command
757 */ 756 */
758 if (send_lq) 757 if (send_lq)
759 iwl_send_lq_cmd(priv, ctx, &lq, 758 iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
760 CMD_SYNC, true);
761 spin_lock_bh(&priv->sta_lock); 759 spin_lock_bh(&priv->sta_lock);
762 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 760 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
763 } 761 }
@@ -968,7 +966,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
968 return -ENOMEM; 966 return -ENOMEM;
969 } 967 }
970 968
971 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); 969 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
972 if (ret) 970 if (ret)
973 IWL_ERR(priv, "Link quality command failed (%d)\n", ret); 971 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
974 972
@@ -999,7 +997,6 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
999 struct iwl_host_cmd cmd = { 997 struct iwl_host_cmd cmd = {
1000 .id = ctx->wep_key_cmd, 998 .id = ctx->wep_key_cmd,
1001 .data = { wep_cmd, }, 999 .data = { wep_cmd, },
1002 .flags = CMD_SYNC,
1003 }; 1000 };
1004 1001
1005 might_sleep(); 1002 might_sleep();
@@ -1248,7 +1245,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1248 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1245 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1249 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1246 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1250 1247
1251 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1248 return iwl_send_add_sta(priv, &sta_cmd, 0);
1252} 1249}
1253 1250
1254int iwl_set_dynamic_key(struct iwl_priv *priv, 1251int iwl_set_dynamic_key(struct iwl_priv *priv,
@@ -1284,13 +1281,13 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1284 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1281 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1285 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1282 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1286 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1283 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1287 seq.tkip.iv32, p1k, CMD_SYNC); 1284 seq.tkip.iv32, p1k, 0);
1288 break; 1285 break;
1289 case WLAN_CIPHER_SUITE_CCMP: 1286 case WLAN_CIPHER_SUITE_CCMP:
1290 case WLAN_CIPHER_SUITE_WEP40: 1287 case WLAN_CIPHER_SUITE_WEP40:
1291 case WLAN_CIPHER_SUITE_WEP104: 1288 case WLAN_CIPHER_SUITE_WEP104:
1292 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1289 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1293 0, NULL, CMD_SYNC); 1290 0, NULL, 0);
1294 break; 1291 break;
1295 default: 1292 default:
1296 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher); 1293 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
@@ -1409,7 +1406,7 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1409 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 1406 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1410 spin_unlock_bh(&priv->sta_lock); 1407 spin_unlock_bh(&priv->sta_lock);
1411 1408
1412 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1409 return iwl_send_add_sta(priv, &sta_cmd, 0);
1413} 1410}
1414 1411
1415int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, 1412int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1433,7 +1430,7 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1433 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 1430 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1434 spin_unlock_bh(&priv->sta_lock); 1431 spin_unlock_bh(&priv->sta_lock);
1435 1432
1436 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1433 return iwl_send_add_sta(priv, &sta_cmd, 0);
1437} 1434}
1438 1435
1439int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, 1436int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1458,7 +1455,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1458 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 1455 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1459 spin_unlock_bh(&priv->sta_lock); 1456 spin_unlock_bh(&priv->sta_lock);
1460 1457
1461 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 1458 return iwl_send_add_sta(priv, &sta_cmd, 0);
1462} 1459}
1463 1460
1464 1461
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index 058c5892c427..acb981a0a0aa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -236,7 +236,7 @@ static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
236{ 236{
237 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n"); 237 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
238 /* make request to retrieve statistics information */ 238 /* make request to retrieve statistics information */
239 iwl_send_statistics_request(priv, CMD_SYNC, false); 239 iwl_send_statistics_request(priv, 0, false);
240 /* Reschedule the ct_kill wait timer */ 240 /* Reschedule the ct_kill wait timer */
241 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm, 241 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
242 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION)); 242 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 398dd096674c..3255a1723d17 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -402,10 +402,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
402 /* aggregation is on for this <sta,tid> */ 402 /* aggregation is on for this <sta,tid> */
403 if (info->flags & IEEE80211_TX_CTL_AMPDU && 403 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
404 tid_data->agg.state != IWL_AGG_ON) { 404 tid_data->agg.state != IWL_AGG_ON) {
405 IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:" 405 IWL_ERR(priv,
406 " Tx flags = 0x%08x, agg.state = %d", 406 "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
407 info->flags, tid_data->agg.state); 407 info->flags, tid_data->agg.state);
408 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", 408 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
409 sta_id, tid, 409 sta_id, tid,
410 IEEE80211_SEQ_TO_SN(tid_data->seq_number)); 410 IEEE80211_SEQ_TO_SN(tid_data->seq_number));
411 goto drop_unlock_sta; 411 goto drop_unlock_sta;
@@ -416,7 +416,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
416 */ 416 */
417 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && 417 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
418 tid_data->agg.state != IWL_AGG_OFF, 418 tid_data->agg.state != IWL_AGG_OFF,
419 "Tx while agg.state = %d", tid_data->agg.state)) 419 "Tx while agg.state = %d\n", tid_data->agg.state))
420 goto drop_unlock_sta; 420 goto drop_unlock_sta;
421 421
422 seq_number = tid_data->seq_number; 422 seq_number = tid_data->seq_number;
@@ -778,8 +778,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
778 /* There are no packets for this RA / TID in the HW any more */ 778 /* There are no packets for this RA / TID in the HW any more */
779 if (tid_data->agg.ssn == tid_data->next_reclaimed) { 779 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
780 IWL_DEBUG_TX_QUEUES(priv, 780 IWL_DEBUG_TX_QUEUES(priv,
781 "Can continue DELBA flow ssn = next_recl =" 781 "Can continue DELBA flow ssn = next_recl = %d\n",
782 " %d", tid_data->next_reclaimed); 782 tid_data->next_reclaimed);
783 iwl_trans_txq_disable(priv->trans, 783 iwl_trans_txq_disable(priv->trans,
784 tid_data->agg.txq_id); 784 tid_data->agg.txq_id);
785 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); 785 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
@@ -791,8 +791,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
791 /* There are no packets for this RA / TID in the HW any more */ 791 /* There are no packets for this RA / TID in the HW any more */
792 if (tid_data->agg.ssn == tid_data->next_reclaimed) { 792 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
793 IWL_DEBUG_TX_QUEUES(priv, 793 IWL_DEBUG_TX_QUEUES(priv,
794 "Can continue ADDBA flow ssn = next_recl =" 794 "Can continue ADDBA flow ssn = next_recl = %d\n",
795 " %d", tid_data->next_reclaimed); 795 tid_data->next_reclaimed);
796 tid_data->agg.state = IWL_AGG_STARTING; 796 tid_data->agg.state = IWL_AGG_STARTING;
797 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); 797 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
798 } 798 }
@@ -1216,8 +1216,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1216 ctx->vif->type == NL80211_IFTYPE_STATION) { 1216 ctx->vif->type == NL80211_IFTYPE_STATION) {
1217 /* block and stop all queues */ 1217 /* block and stop all queues */
1218 priv->passive_no_rx = true; 1218 priv->passive_no_rx = true;
1219 IWL_DEBUG_TX_QUEUES(priv, "stop all queues: " 1219 IWL_DEBUG_TX_QUEUES(priv,
1220 "passive channel"); 1220 "stop all queues: passive channel\n");
1221 ieee80211_stop_queues(priv->hw); 1221 ieee80211_stop_queues(priv->hw);
1222 1222
1223 IWL_DEBUG_TX_REPLY(priv, 1223 IWL_DEBUG_TX_REPLY(priv,
@@ -1271,7 +1271,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1271 1271
1272 while (!skb_queue_empty(&skbs)) { 1272 while (!skb_queue_empty(&skbs)) {
1273 skb = __skb_dequeue(&skbs); 1273 skb = __skb_dequeue(&skbs);
1274 ieee80211_tx_status_ni(priv->hw, skb); 1274 ieee80211_tx_status(priv->hw, skb);
1275 } 1275 }
1276 1276
1277 return 0; 1277 return 0;
@@ -1411,7 +1411,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1411 1411
1412 while (!skb_queue_empty(&reclaimed_skbs)) { 1412 while (!skb_queue_empty(&reclaimed_skbs)) {
1413 skb = __skb_dequeue(&reclaimed_skbs); 1413 skb = __skb_dequeue(&reclaimed_skbs);
1414 ieee80211_tx_status_ni(priv->hw, skb); 1414 ieee80211_tx_status(priv->hw, skb);
1415 } 1415 }
1416 1416
1417 return 0; 1417 return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index cf03ef5619d9..d5cee1530597 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -172,7 +172,7 @@ static int iwl_send_wimax_coex(struct iwl_priv *priv)
172 memset(&coex_cmd, 0, sizeof(coex_cmd)); 172 memset(&coex_cmd, 0, sizeof(coex_cmd));
173 173
174 return iwl_dvm_send_cmd_pdu(priv, 174 return iwl_dvm_send_cmd_pdu(priv,
175 COEX_PRIORITY_TABLE_CMD, CMD_SYNC, 175 COEX_PRIORITY_TABLE_CMD, 0,
176 sizeof(coex_cmd), &coex_cmd); 176 sizeof(coex_cmd), &coex_cmd);
177} 177}
178 178
@@ -205,7 +205,7 @@ void iwl_send_prio_tbl(struct iwl_priv *priv)
205 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl, 205 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
206 sizeof(iwl_bt_prio_tbl)); 206 sizeof(iwl_bt_prio_tbl));
207 if (iwl_dvm_send_cmd_pdu(priv, 207 if (iwl_dvm_send_cmd_pdu(priv,
208 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, 208 REPLY_BT_COEX_PRIO_TABLE, 0,
209 sizeof(prio_tbl_cmd), &prio_tbl_cmd)) 209 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
210 IWL_ERR(priv, "failed to send BT prio tbl command\n"); 210 IWL_ERR(priv, "failed to send BT prio tbl command\n");
211} 211}
@@ -218,7 +218,7 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
218 env_cmd.action = action; 218 env_cmd.action = action;
219 env_cmd.type = type; 219 env_cmd.type = type;
220 ret = iwl_dvm_send_cmd_pdu(priv, 220 ret = iwl_dvm_send_cmd_pdu(priv,
221 REPLY_BT_COEX_PROT_ENV, CMD_SYNC, 221 REPLY_BT_COEX_PROT_ENV, 0,
222 sizeof(env_cmd), &env_cmd); 222 sizeof(env_cmd), &env_cmd);
223 if (ret) 223 if (ret)
224 IWL_ERR(priv, "failed to send BT env command\n"); 224 IWL_ERR(priv, "failed to send BT env command\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 854ba84ccb73..c3817fae16c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
62 .led_compensation = 51, 62 .led_compensation = 51,
63 .wd_timeout = IWL_WATCHDOG_DISABLED, 63 .wd_timeout = IWL_WATCHDOG_DISABLED,
64 .max_event_log_size = 128, 64 .max_event_log_size = 128,
65 .scd_chain_ext_wa = true,
65}; 66};
66 67
67static const struct iwl_ht_params iwl1000_ht_params = { 68static const struct iwl_ht_params iwl1000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 3e63323637f3..21e5d0843a62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
75 .wd_timeout = IWL_DEF_WD_TIMEOUT, 75 .wd_timeout = IWL_DEF_WD_TIMEOUT,
76 .max_event_log_size = 512, 76 .max_event_log_size = 512,
77 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 77 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
78 .scd_chain_ext_wa = true,
78}; 79};
79 80
80 81
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
88 .wd_timeout = IWL_LONG_WD_TIMEOUT, 89 .wd_timeout = IWL_LONG_WD_TIMEOUT,
89 .max_event_log_size = 512, 90 .max_event_log_size = 512,
90 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 91 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
92 .scd_chain_ext_wa = true,
91}; 93};
92 94
93static const struct iwl_ht_params iwl2000_ht_params = { 95static const struct iwl_ht_params iwl2000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6674f2c4541c..332bbede39e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
61 .led_compensation = 51, 61 .led_compensation = 51,
62 .wd_timeout = IWL_WATCHDOG_DISABLED, 62 .wd_timeout = IWL_WATCHDOG_DISABLED,
63 .max_event_log_size = 512, 63 .max_event_log_size = 512,
64 .scd_chain_ext_wa = true,
64}; 65};
65 66
66static const struct iwl_ht_params iwl5000_ht_params = { 67static const struct iwl_ht_params iwl5000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8048de90233f..8f2c3c8c6b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
85 .wd_timeout = IWL_DEF_WD_TIMEOUT, 85 .wd_timeout = IWL_DEF_WD_TIMEOUT,
86 .max_event_log_size = 512, 86 .max_event_log_size = 512,
87 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 87 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
88 .scd_chain_ext_wa = true,
88}; 89};
89 90
90static const struct iwl_base_params iwl6050_base_params = { 91static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
97 .wd_timeout = IWL_DEF_WD_TIMEOUT, 98 .wd_timeout = IWL_DEF_WD_TIMEOUT,
98 .max_event_log_size = 1024, 99 .max_event_log_size = 1024,
99 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .scd_chain_ext_wa = true,
100}; 102};
101 103
102static const struct iwl_base_params iwl6000_g2_base_params = { 104static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
109 .wd_timeout = IWL_LONG_WD_TIMEOUT, 111 .wd_timeout = IWL_LONG_WD_TIMEOUT,
110 .max_event_log_size = 512, 112 .max_event_log_size = 512,
111 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ 113 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
114 .scd_chain_ext_wa = true,
112}; 115};
113 116
114static const struct iwl_ht_params iwl6000_ht_params = { 117static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 4c2d4ef28b22..48730064da73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,12 +71,12 @@
71#define IWL3160_UCODE_API_MAX 9 71#define IWL3160_UCODE_API_MAX 9
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 8 74#define IWL7260_UCODE_API_OK 9
75#define IWL3160_UCODE_API_OK 8 75#define IWL3160_UCODE_API_OK 9
76 76
77/* Lowest firmware API version supported */ 77/* Lowest firmware API version supported */
78#define IWL7260_UCODE_API_MIN 7 78#define IWL7260_UCODE_API_MIN 8
79#define IWL3160_UCODE_API_MIN 7 79#define IWL3160_UCODE_API_MIN 8
80 80
81/* NVM versions */ 81/* NVM versions */
82#define IWL7260_NVM_VERSION 0x0a1d 82#define IWL7260_NVM_VERSION 0x0a1d
@@ -98,7 +98,7 @@
98#define NVM_HW_SECTION_NUM_FAMILY_7000 0 98#define NVM_HW_SECTION_NUM_FAMILY_7000 0
99 99
100static const struct iwl_base_params iwl7000_base_params = { 100static const struct iwl_base_params iwl7000_base_params = {
101 .eeprom_size = OTP_LOW_IMAGE_SIZE, 101 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
102 .num_of_queues = IWLAGN_NUM_QUEUES, 102 .num_of_queues = IWLAGN_NUM_QUEUES,
103 .pll_cfg_val = 0, 103 .pll_cfg_val = 0,
104 .shadow_ram_support = true, 104 .shadow_ram_support = true,
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
107 .max_event_log_size = 512, 107 .max_event_log_size = 512,
108 .shadow_reg_enable = true, 108 .shadow_reg_enable = true,
109 .pcie_l1_allowed = true, 109 .pcie_l1_allowed = true,
110 .apmg_wake_up_wa = true,
110}; 111};
111 112
112static const struct iwl_ht_params iwl7000_ht_params = { 113static const struct iwl_ht_params iwl7000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index f5bd82b88592..51c41531d81d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -83,9 +83,10 @@
83#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode" 83#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
84 84
85#define NVM_HW_SECTION_NUM_FAMILY_8000 10 85#define NVM_HW_SECTION_NUM_FAMILY_8000 10
86#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
86 87
87static const struct iwl_base_params iwl8000_base_params = { 88static const struct iwl_base_params iwl8000_base_params = {
88 .eeprom_size = OTP_LOW_IMAGE_SIZE, 89 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
89 .num_of_queues = IWLAGN_NUM_QUEUES, 90 .num_of_queues = IWLAGN_NUM_QUEUES,
90 .pll_cfg_val = 0, 91 .pll_cfg_val = 0,
91 .shadow_ram_support = true, 92 .shadow_ram_support = true,
@@ -118,6 +119,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
118 .ht_params = &iwl8000_ht_params, 119 .ht_params = &iwl8000_ht_params,
119 .nvm_ver = IWL8000_NVM_VERSION, 120 .nvm_ver = IWL8000_NVM_VERSION,
120 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 121 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
122 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
121}; 123};
122 124
123const struct iwl_cfg iwl8260_n_cfg = { 125const struct iwl_cfg iwl8260_n_cfg = {
@@ -127,6 +129,7 @@ const struct iwl_cfg iwl8260_n_cfg = {
127 .ht_params = &iwl8000_ht_params, 129 .ht_params = &iwl8000_ht_params,
128 .nvm_ver = IWL8000_NVM_VERSION, 130 .nvm_ver = IWL8000_NVM_VERSION,
129 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 131 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
132 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
130}; 133};
131 134
132MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 135MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7f37fb86837b..04a483d38659 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,9 +102,7 @@
102 102
103/* EEPROM */ 103/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048 104#define IWLAGN_EEPROM_IMG_SIZE 2048
105/* OTP */ 105
106/* lower blocks contain EEPROM image and calibration data */
107#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
108/* high blocks contain PAPD data */ 106/* high blocks contain PAPD data */
109#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ 107#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
110#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ 108#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f17dc3f2c8a..b7047905f41a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
146 * @wd_timeout: TX queues watchdog timeout 146 * @wd_timeout: TX queues watchdog timeout
147 * @max_event_log_size: size of event log buffer size for ucode event logging 147 * @max_event_log_size: size of event log buffer size for ucode event logging
148 * @shadow_reg_enable: HW shadow register support 148 * @shadow_reg_enable: HW shadow register support
149 * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
150 * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
151 * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
149 */ 152 */
150struct iwl_base_params { 153struct iwl_base_params {
151 int eeprom_size; 154 int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
160 u32 max_event_log_size; 163 u32 max_event_log_size;
161 const bool shadow_reg_enable; 164 const bool shadow_reg_enable;
162 const bool pcie_l1_allowed; 165 const bool pcie_l1_allowed;
166 const bool apmg_wake_up_wa;
167 const bool scd_chain_ext_wa;
163}; 168};
164 169
165/* 170/*
@@ -188,6 +193,11 @@ struct iwl_ht_params {
188#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 193#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
189#define EEPROM_REGULATORY_BAND_NO_HT40 0 194#define EEPROM_REGULATORY_BAND_NO_HT40 0
190 195
196/* lower blocks contain EEPROM image and calibration data */
197#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
198#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
199#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
200
191struct iwl_eeprom_params { 201struct iwl_eeprom_params {
192 const u8 regulatory_bands[7]; 202 const u8 regulatory_bands[7];
193 bool enhanced_txpower; 203 bool enhanced_txpower;
@@ -264,6 +274,8 @@ struct iwl_cfg {
264 u8 nvm_hw_section_num; 274 u8 nvm_hw_section_num;
265 bool lp_xtal_workaround; 275 bool lp_xtal_workaround;
266 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; 276 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
277 bool no_power_up_nic_in_init;
278 const char *default_nvm_file;
267}; 279};
268 280
269/* 281/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 8a44f594528d..09feff4fa226 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,8 +61,6 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#define DEBUG
65
66#include <linux/device.h> 64#include <linux/device.h>
67#include <linux/interrupt.h> 65#include <linux/interrupt.h>
68#include <linux/export.h> 66#include <linux/export.h>
@@ -128,8 +126,8 @@ void __iwl_dbg(struct device *dev,
128#ifdef CONFIG_IWLWIFI_DEBUG 126#ifdef CONFIG_IWLWIFI_DEBUG
129 if (iwl_have_debug_level(level) && 127 if (iwl_have_debug_level(level) &&
130 (!limit || net_ratelimit())) 128 (!limit || net_ratelimit()))
131 dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U', 129 dev_printk(KERN_DEBUG, dev, "%c %s %pV",
132 function, &vaf); 130 in_interrupt() ? 'I' : 'U', function, &vaf);
133#endif 131#endif
134 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); 132 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
135 va_end(args); 133 va_end(args);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c8cbdbe15924..295083510e72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,12 +47,32 @@ void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
47void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3); 47void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
48void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); 48void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
49 49
50/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
51#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
52
50/* No matter what is m (priv, bus, trans), this will work */ 53/* No matter what is m (priv, bus, trans), this will work */
51#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a) 54#define IWL_ERR_DEV(d, f, a...) \
52#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a) 55 do { \
53#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a) 56 CHECK_FOR_NEWLINE(f); \
54#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a) 57 __iwl_err((d), false, false, f, ## a); \
55#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a) 58 } while (0)
59#define IWL_ERR(m, f, a...) \
60 IWL_ERR_DEV((m)->dev, f, ## a)
61#define IWL_WARN(m, f, a...) \
62 do { \
63 CHECK_FOR_NEWLINE(f); \
64 __iwl_warn((m)->dev, f, ## a); \
65 } while (0)
66#define IWL_INFO(m, f, a...) \
67 do { \
68 CHECK_FOR_NEWLINE(f); \
69 __iwl_info((m)->dev, f, ## a); \
70 } while (0)
71#define IWL_CRIT(m, f, a...) \
72 do { \
73 CHECK_FOR_NEWLINE(f); \
74 __iwl_crit((m)->dev, f, ## a); \
75 } while (0)
56 76
57#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 77#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
58void __iwl_dbg(struct device *dev, 78void __iwl_dbg(struct device *dev,
@@ -72,12 +92,17 @@ do { \
72 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 92 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
73} while (0) 93} while (0)
74 94
95#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
96 do { \
97 CHECK_FOR_NEWLINE(fmt); \
98 __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
99 } while (0)
75#define IWL_DEBUG(m, level, fmt, args...) \ 100#define IWL_DEBUG(m, level, fmt, args...) \
76 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args) 101 __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
77#define IWL_DEBUG_DEV(dev, level, fmt, args...) \ 102#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
78 __iwl_dbg((dev), level, false, __func__, fmt, ##args) 103 __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
79#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 104#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
80 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args) 105 __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
81 106
82#ifdef CONFIG_IWLWIFI_DEBUG 107#ifdef CONFIG_IWLWIFI_DEBUG
83#define iwl_print_hex_dump(m, level, p, len) \ 108#define iwl_print_hex_dump(m, level, p, len) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 0a3e841b44a9..f2a5c12269a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1243,6 +1243,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
1243 .bt_coex_active = true, 1243 .bt_coex_active = true,
1244 .power_level = IWL_POWER_INDEX_1, 1244 .power_level = IWL_POWER_INDEX_1,
1245 .wd_disable = true, 1245 .wd_disable = true,
1246 .uapsd_disable = false,
1246 /* the rest are 0 by default */ 1247 /* the rest are 0 by default */
1247}; 1248};
1248IWL_EXPORT_SYMBOL(iwlwifi_mod_params); 1249IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1356,6 +1357,10 @@ MODULE_PARM_DESC(wd_disable,
1356module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); 1357module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
1357MODULE_PARM_DESC(nvm_file, "NVM file name"); 1358MODULE_PARM_DESC(nvm_file, "NVM file name");
1358 1359
1360module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
1361 bool, S_IRUGO);
1362MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
1363
1359/* 1364/*
1360 * set bt_coex_active to true, uCode will do kill/defer 1365 * set bt_coex_active to true, uCode will do kill/defer
1361 * every time the priority line is asserted (BT is sending signals on the 1366 * every time the priority line is asserted (BT is sending signals on the
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 58c8941c0d95..2953ffceda38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -71,10 +71,15 @@
71 * enum iwl_fw_error_dump_type - types of data in the dump file 71 * enum iwl_fw_error_dump_type - types of data in the dump file
72 * @IWL_FW_ERROR_DUMP_SRAM: 72 * @IWL_FW_ERROR_DUMP_SRAM:
73 * @IWL_FW_ERROR_DUMP_REG: 73 * @IWL_FW_ERROR_DUMP_REG:
74 * @IWL_FW_ERROR_DUMP_RXF:
75 * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
76 * &struct iwl_fw_error_dump_txcmd packets
74 */ 77 */
75enum iwl_fw_error_dump_type { 78enum iwl_fw_error_dump_type {
76 IWL_FW_ERROR_DUMP_SRAM = 0, 79 IWL_FW_ERROR_DUMP_SRAM = 0,
77 IWL_FW_ERROR_DUMP_REG = 1, 80 IWL_FW_ERROR_DUMP_REG = 1,
81 IWL_FW_ERROR_DUMP_RXF = 2,
82 IWL_FW_ERROR_DUMP_TXCMD = 3,
78 83
79 IWL_FW_ERROR_DUMP_MAX, 84 IWL_FW_ERROR_DUMP_MAX,
80}; 85};
@@ -89,7 +94,7 @@ struct iwl_fw_error_dump_data {
89 __le32 type; 94 __le32 type;
90 __le32 len; 95 __le32 len;
91 __u8 data[]; 96 __u8 data[];
92} __packed __aligned(4); 97} __packed;
93 98
94/** 99/**
95 * struct iwl_fw_error_dump_file - the layout of the header of the file 100 * struct iwl_fw_error_dump_file - the layout of the header of the file
@@ -101,6 +106,29 @@ struct iwl_fw_error_dump_file {
101 __le32 barker; 106 __le32 barker;
102 __le32 file_len; 107 __le32 file_len;
103 u8 data[0]; 108 u8 data[0];
104} __packed __aligned(4); 109} __packed;
110
111/**
112 * struct iwl_fw_error_dump_txcmd - TX command data
113 * @cmdlen: original length of command
114 * @caplen: captured length of command (may be less)
115 * @data: captured command data, @caplen bytes
116 */
117struct iwl_fw_error_dump_txcmd {
118 __le32 cmdlen;
119 __le32 caplen;
120 u8 data[];
121} __packed;
122
123/**
124 * iwl_mvm_fw_error_next_data - advance fw error dump data pointer
125 * @data: previous data block
126 * Returns: next data block
127 */
128static inline struct iwl_fw_error_dump_data *
129iwl_mvm_fw_error_next_data(struct iwl_fw_error_dump_data *data)
130{
131 return (void *)(data->data + le32_to_cpu(data->len));
132}
105 133
106#endif /* __fw_error_dump_h__ */ 134#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d14f19339d61..0aa7c0085c9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,29 +74,24 @@
74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). 74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. 75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS 76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
77 * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD 77 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
78 * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan 78 * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
79 * offload profile config command. 79 * offload profile config command.
80 * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
81 * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
82 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six 80 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
83 * (rather than two) IPv6 addresses 81 * (rather than two) IPv6 addresses
84 * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
85 * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element 82 * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
86 * from the probe request template. 83 * from the probe request template.
87 * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
88 * connection when going back to D0
89 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) 84 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
90 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) 85 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
91 * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan. 86 * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
92 * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
93 * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
94 * containing CAM (Continuous Active Mode) indication.
95 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and 87 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
96 * P2P client interfaces simultaneously if they are in different bindings. 88 * P2P client interfaces simultaneously if they are in different bindings.
89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
90 * P2P client interfaces simultaneously if they are in same bindings.
97 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save 91 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
98 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. 92 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
99 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients 93 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
94 * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
100 */ 95 */
101enum iwl_ucode_tlv_flag { 96enum iwl_ucode_tlv_flag {
102 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 97 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -104,22 +99,16 @@ enum iwl_ucode_tlv_flag {
104 IWL_UCODE_TLV_FLAGS_MFP = BIT(2), 99 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
105 IWL_UCODE_TLV_FLAGS_P2P = BIT(3), 100 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
106 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), 101 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
107 IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
108 IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
109 IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), 102 IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
110 IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
111 IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
112 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), 103 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
113 IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
114 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), 104 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
115 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
116 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), 105 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
117 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), 106 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
118 IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17), 107 IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
119 IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
120 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
121 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), 108 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
109 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
122 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), 110 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
111 IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
123 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), 112 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
124 IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), 113 IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
125 IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), 114 IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
@@ -128,9 +117,11 @@ enum iwl_ucode_tlv_flag {
128/** 117/**
129 * enum iwl_ucode_tlv_api - ucode api 118 * enum iwl_ucode_tlv_api - ucode api
130 * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field. 119 * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
120 * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
131 */ 121 */
132enum iwl_ucode_tlv_api { 122enum iwl_ucode_tlv_api {
133 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), 123 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
124 IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
134}; 125};
135 126
136/** 127/**
@@ -183,6 +174,7 @@ enum iwl_ucode_sec {
183#define IWL_UCODE_SECTION_MAX 12 174#define IWL_UCODE_SECTION_MAX 12
184#define IWL_API_ARRAY_SIZE 1 175#define IWL_API_ARRAY_SIZE 1
185#define IWL_CAPABILITIES_ARRAY_SIZE 1 176#define IWL_CAPABILITIES_ARRAY_SIZE 1
177#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
186 178
187struct iwl_ucode_capabilities { 179struct iwl_ucode_capabilities {
188 u32 max_probe_length; 180 u32 max_probe_length;
@@ -205,6 +197,11 @@ struct fw_img {
205 bool is_dual_cpus; 197 bool is_dual_cpus;
206}; 198};
207 199
200struct iwl_sf_region {
201 u32 addr;
202 u32 size;
203};
204
208/* uCode version contains 4 values: Major/Minor/API/Serial */ 205/* uCode version contains 4 values: Major/Minor/API/Serial */
209#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) 206#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
210#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) 207#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 44cc3cf45762..5eef4ae7333b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,7 @@
33#include "iwl-io.h" 33#include "iwl-io.h"
34#include "iwl-csr.h" 34#include "iwl-csr.h"
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-prph.h"
36#include "iwl-fh.h" 37#include "iwl-fh.h"
37 38
38#define IWL_POLL_INTERVAL 10 /* microseconds */ 39#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -183,6 +184,23 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
183} 184}
184IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); 185IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
185 186
187void iwl_force_nmi(struct iwl_trans *trans)
188{
189 /*
190 * In HW previous to the 8000 HW family, and in the 8000 HW family
191 * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
192 * to force an NMI. Otherwise, a different register -
193 * DEVICE_SET_NMI_8000B_REG - is used.
194 */
195 if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
196 ((trans->hw_rev & 0xc) == 0x0))
197 iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL);
198 else
199 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
200 DEVICE_SET_NMI_8000B_VAL);
201}
202IWL_EXPORT_SYMBOL(iwl_force_nmi);
203
186static const char *get_fh_string(int cmd) 204static const char *get_fh_string(int cmd)
187{ 205{
188#define IWL_CMD(x) case x: return #x 206#define IWL_CMD(x) case x: return #x
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 665ddd9dbbc4..705d12c079e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -80,6 +80,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
80void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 80void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
81 u32 bits, u32 mask); 81 u32 bits, u32 mask);
82void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); 82void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
83void iwl_force_nmi(struct iwl_trans *trans);
83 84
84/* Error handling */ 85/* Error handling */
85int iwl_dump_fh(struct iwl_trans *trans, char **buf); 86int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d994317db85b..d051857729ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -119,6 +119,7 @@ struct iwl_mod_params {
119#endif 119#endif
120 int ant_coupling; 120 int ant_coupling;
121 char *nvm_file; 121 char *nvm_file;
122 bool uapsd_disable;
122}; 123};
123 124
124#endif /* #__iwl_modparams_h__ */ 125#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6be30c698506..85eee79c495c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -62,6 +62,7 @@
62#include <linux/types.h> 62#include <linux/types.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65#include <linux/etherdevice.h>
65#include "iwl-drv.h" 66#include "iwl-drv.h"
66#include "iwl-modparams.h" 67#include "iwl-modparams.h"
67#include "iwl-nvm-parse.h" 68#include "iwl-nvm-parse.h"
@@ -127,19 +128,20 @@ static const u8 iwl_nvm_channels[] = {
127 128
128static const u8 iwl_nvm_channels_family_8000[] = { 129static const u8 iwl_nvm_channels_family_8000[] = {
129 /* 2.4 GHz */ 130 /* 2.4 GHz */
130 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 131 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
131 /* 5 GHz */ 132 /* 5 GHz */
132 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 133 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
133 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 134 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
134 149, 153, 157, 161, 165, 169, 173, 177, 181 135 149, 153, 157, 161, 165, 169, 173, 177, 181
135}; 136};
136 137
137#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 138#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
138#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000) 139#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
139#define NUM_2GHZ_CHANNELS 14 140#define NUM_2GHZ_CHANNELS 14
140#define FIRST_2GHZ_HT_MINUS 5 141#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
141#define LAST_2GHZ_HT_PLUS 9 142#define FIRST_2GHZ_HT_MINUS 5
142#define LAST_5GHZ_HT 161 143#define LAST_2GHZ_HT_PLUS 9
144#define LAST_5GHZ_HT 161
143 145
144#define DEFAULT_MAX_TX_POWER 16 146#define DEFAULT_MAX_TX_POWER 16
145 147
@@ -202,21 +204,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
202 struct ieee80211_channel *channel; 204 struct ieee80211_channel *channel;
203 u16 ch_flags; 205 u16 ch_flags;
204 bool is_5ghz; 206 bool is_5ghz;
205 int num_of_ch; 207 int num_of_ch, num_2ghz_channels;
206 const u8 *nvm_chan; 208 const u8 *nvm_chan;
207 209
208 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { 210 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
209 num_of_ch = IWL_NUM_CHANNELS; 211 num_of_ch = IWL_NUM_CHANNELS;
210 nvm_chan = &iwl_nvm_channels[0]; 212 nvm_chan = &iwl_nvm_channels[0];
213 num_2ghz_channels = NUM_2GHZ_CHANNELS;
211 } else { 214 } else {
212 num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000; 215 num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
213 nvm_chan = &iwl_nvm_channels_family_8000[0]; 216 nvm_chan = &iwl_nvm_channels_family_8000[0];
217 num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
214 } 218 }
215 219
216 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 220 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
217 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 221 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
218 222
219 if (ch_idx >= NUM_2GHZ_CHANNELS && 223 if (ch_idx >= num_2ghz_channels &&
220 !data->sku_cap_band_52GHz_enable) 224 !data->sku_cap_band_52GHz_enable)
221 ch_flags &= ~NVM_CHANNEL_VALID; 225 ch_flags &= ~NVM_CHANNEL_VALID;
222 226
@@ -225,7 +229,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
225 "Ch. %d Flags %x [%sGHz] - No traffic\n", 229 "Ch. %d Flags %x [%sGHz] - No traffic\n",
226 nvm_chan[ch_idx], 230 nvm_chan[ch_idx],
227 ch_flags, 231 ch_flags,
228 (ch_idx >= NUM_2GHZ_CHANNELS) ? 232 (ch_idx >= num_2ghz_channels) ?
229 "5.2" : "2.4"); 233 "5.2" : "2.4");
230 continue; 234 continue;
231 } 235 }
@@ -234,7 +238,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
234 n_channels++; 238 n_channels++;
235 239
236 channel->hw_value = nvm_chan[ch_idx]; 240 channel->hw_value = nvm_chan[ch_idx];
237 channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ? 241 channel->band = (ch_idx < num_2ghz_channels) ?
238 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 242 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
239 channel->center_freq = 243 channel->center_freq =
240 ieee80211_channel_to_frequency( 244 ieee80211_channel_to_frequency(
@@ -242,7 +246,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
242 246
243 /* TODO: Need to be dependent to the NVM */ 247 /* TODO: Need to be dependent to the NVM */
244 channel->flags = IEEE80211_CHAN_NO_HT40; 248 channel->flags = IEEE80211_CHAN_NO_HT40;
245 if (ch_idx < NUM_2GHZ_CHANNELS && 249 if (ch_idx < num_2ghz_channels &&
246 (ch_flags & NVM_CHANNEL_40MHZ)) { 250 (ch_flags & NVM_CHANNEL_40MHZ)) {
247 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 251 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
248 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 252 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +254,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
250 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 254 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
251 } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT && 255 } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
252 (ch_flags & NVM_CHANNEL_40MHZ)) { 256 (ch_flags & NVM_CHANNEL_40MHZ)) {
253 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 257 if ((ch_idx - num_2ghz_channels) % 2 == 0)
254 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 258 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
255 else 259 else
256 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 260 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
@@ -447,13 +451,7 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
447 struct iwl_nvm_data *data, 451 struct iwl_nvm_data *data,
448 const __le16 *nvm_sec) 452 const __le16 *nvm_sec)
449{ 453{
450 u8 hw_addr[ETH_ALEN]; 454 const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
451
452 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
453 memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
454 else
455 memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
456 ETH_ALEN);
457 455
458 /* The byte order is little endian 16 bit, meaning 214365 */ 456 /* The byte order is little endian 16 bit, meaning 214365 */
459 data->hw_addr[0] = hw_addr[1]; 457 data->hw_addr[0] = hw_addr[1];
@@ -464,6 +462,41 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
464 data->hw_addr[5] = hw_addr[4]; 462 data->hw_addr[5] = hw_addr[4];
465} 463}
466 464
465static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
466 struct iwl_nvm_data *data,
467 const __le16 *mac_override,
468 const __le16 *nvm_hw)
469{
470 const u8 *hw_addr;
471
472 if (mac_override) {
473 hw_addr = (const u8 *)(mac_override +
474 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
475
476 /* The byte order is little endian 16 bit, meaning 214365 */
477 data->hw_addr[0] = hw_addr[1];
478 data->hw_addr[1] = hw_addr[0];
479 data->hw_addr[2] = hw_addr[3];
480 data->hw_addr[3] = hw_addr[2];
481 data->hw_addr[4] = hw_addr[5];
482 data->hw_addr[5] = hw_addr[4];
483
484 if (is_valid_ether_addr(hw_addr))
485 return;
486 }
487
488 /* take the MAC address from the OTP */
489 hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_FAMILY_8000);
490 data->hw_addr[0] = hw_addr[3];
491 data->hw_addr[1] = hw_addr[2];
492 data->hw_addr[2] = hw_addr[1];
493 data->hw_addr[3] = hw_addr[0];
494
495 hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_FAMILY_8000);
496 data->hw_addr[4] = hw_addr[1];
497 data->hw_addr[5] = hw_addr[0];
498}
499
467struct iwl_nvm_data * 500struct iwl_nvm_data *
468iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 501iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
469 const __le16 *nvm_hw, const __le16 *nvm_sw, 502 const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -523,7 +556,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
523 rx_chains); 556 rx_chains);
524 } else { 557 } else {
525 /* MAC address in family 8000 */ 558 /* MAC address in family 8000 */
526 iwl_set_hw_address(cfg, data, mac_override); 559 iwl_set_hw_address_family_8000(cfg, data, mac_override, nvm_hw);
527 560
528 iwl_init_sbands(dev, cfg, data, regulatory, 561 iwl_init_sbands(dev, cfg, data, regulatory,
529 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains, 562 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ea29504ac617..99785c892f96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -63,6 +63,7 @@
63#ifndef __iwl_op_mode_h__ 63#ifndef __iwl_op_mode_h__
64#define __iwl_op_mode_h__ 64#define __iwl_op_mode_h__
65 65
66#include <linux/netdevice.h>
66#include <linux/debugfs.h> 67#include <linux/debugfs.h>
67 68
68struct iwl_op_mode; 69struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
112 * @stop: stop the op_mode. Must free all the memory allocated. 113 * @stop: stop the op_mode. Must free all the memory allocated.
113 * May sleep 114 * May sleep
114 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 115 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
115 * HCMD this Rx responds to. 116 * HCMD this Rx responds to. Can't sleep.
116 * This callback may sleep, it is called from a threaded IRQ handler. 117 * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
118 * but the higher layers need to know about it (in particular mac80211 to
119 * to able to call the right NAPI RX functions); this function is needed
120 * to eventually call netif_napi_add() with higher layer involvement.
117 * @queue_full: notifies that a HW queue is full. 121 * @queue_full: notifies that a HW queue is full.
118 * Must be atomic and called with BH disabled. 122 * Must be atomic and called with BH disabled.
119 * @queue_not_full: notifies that a HW queue is not full any more. 123 * @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
143 void (*stop)(struct iwl_op_mode *op_mode); 147 void (*stop)(struct iwl_op_mode *op_mode);
144 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 148 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
145 struct iwl_device_cmd *cmd); 149 struct iwl_device_cmd *cmd);
150 void (*napi_add)(struct iwl_op_mode *op_mode,
151 struct napi_struct *napi,
152 struct net_device *napi_dev,
153 int (*poll)(struct napi_struct *, int),
154 int weight);
146 void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 155 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
147 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 156 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
148 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 157 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
180 struct iwl_rx_cmd_buffer *rxb, 189 struct iwl_rx_cmd_buffer *rxb,
181 struct iwl_device_cmd *cmd) 190 struct iwl_device_cmd *cmd)
182{ 191{
183 might_sleep();
184 return op_mode->ops->rx(op_mode, rxb, cmd); 192 return op_mode->ops->rx(op_mode, rxb, cmd);
185} 193}
186 194
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
249 return op_mode->ops->exit_d0i3(op_mode); 257 return op_mode->ops->exit_d0i3(op_mode);
250} 258}
251 259
260static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
261 struct napi_struct *napi,
262 struct net_device *napi_dev,
263 int (*poll)(struct napi_struct *, int),
264 int weight)
265{
266 if (!op_mode->ops->napi_add)
267 return;
268 op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
269}
270
252#endif /* __iwl_op_mode_h__ */ 271#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index b761ac4822a3..d4fb5cad07ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -345,7 +345,6 @@ static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
345 struct iwl_phy_db_cmd phy_db_cmd; 345 struct iwl_phy_db_cmd phy_db_cmd;
346 struct iwl_host_cmd cmd = { 346 struct iwl_host_cmd cmd = {
347 .id = PHY_DB_CMD, 347 .id = PHY_DB_CMD,
348 .flags = CMD_SYNC,
349 }; 348 };
350 349
351 IWL_DEBUG_INFO(phy_db->trans, 350 IWL_DEBUG_INFO(phy_db->trans,
@@ -393,13 +392,13 @@ static int iwl_phy_db_send_all_channel_groups(
393 entry->data); 392 entry->data);
394 if (err) { 393 if (err) {
395 IWL_ERR(phy_db->trans, 394 IWL_ERR(phy_db->trans,
396 "Can't SEND phy_db section %d (%d), err %d", 395 "Can't SEND phy_db section %d (%d), err %d\n",
397 type, i, err); 396 type, i, err);
398 return err; 397 return err;
399 } 398 }
400 399
401 IWL_DEBUG_INFO(phy_db->trans, 400 IWL_DEBUG_INFO(phy_db->trans,
402 "Sent PHY_DB HCMD, type = %d num = %d", 401 "Sent PHY_DB HCMD, type = %d num = %d\n",
403 type, i); 402 type, i);
404 } 403 }
405 404
@@ -451,7 +450,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
451 IWL_NUM_PAPD_CH_GROUPS); 450 IWL_NUM_PAPD_CH_GROUPS);
452 if (err) { 451 if (err) {
453 IWL_ERR(phy_db->trans, 452 IWL_ERR(phy_db->trans,
454 "Cannot send channel specific PAPD groups"); 453 "Cannot send channel specific PAPD groups\n");
455 return err; 454 return err;
456 } 455 }
457 456
@@ -461,7 +460,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
461 IWL_NUM_TXP_CH_GROUPS); 460 IWL_NUM_TXP_CH_GROUPS);
462 if (err) { 461 if (err) {
463 IWL_ERR(phy_db->trans, 462 IWL_ERR(phy_db->trans,
464 "Cannot send channel specific TX power groups"); 463 "Cannot send channel specific TX power groups\n");
465 return err; 464 return err;
466 } 465 }
467 466
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5f657c501406..4997e27672b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -105,6 +105,9 @@
105 105
106/* Device NMI register */ 106/* Device NMI register */
107#define DEVICE_SET_NMI_REG 0x00a01c30 107#define DEVICE_SET_NMI_REG 0x00a01c30
108#define DEVICE_SET_NMI_VAL 0x1
109#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
110#define DEVICE_SET_NMI_8000B_VAL 0x1000000
108 111
109/* Shared registers (0x0..0x3ff, via target indirect or periphery */ 112/* Shared registers (0x0..0x3ff, via target indirect or periphery */
110#define SHR_BASE 0x00a10000 113#define SHR_BASE 0x00a10000
@@ -348,4 +351,12 @@ enum secure_load_status_reg {
348 351
349#define LMPM_SECURE_TIME_OUT (100) 352#define LMPM_SECURE_TIME_OUT (100)
350 353
354/* Rx FIFO */
355#define RXF_SIZE_ADDR (0xa00c88)
356#define RXF_SIZE_BYTE_CND_POS (7)
357#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
358
359#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
360#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
361
351#endif /* __iwl_prph_h__ */ 362#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8cdb0dd618a6..34d49e171fb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -189,10 +189,9 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
189/** 189/**
190 * enum CMD_MODE - how to send the host commands ? 190 * enum CMD_MODE - how to send the host commands ?
191 * 191 *
192 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
193 * @CMD_ASYNC: Return right away and don't wait for the response 192 * @CMD_ASYNC: Return right away and don't wait for the response
194 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 193 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
195 * response. The caller needs to call iwl_free_resp when done. 194 * the response. The caller needs to call iwl_free_resp when done.
196 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the 195 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
197 * command queue, but after other high priority commands. valid only 196 * command queue, but after other high priority commands. valid only
198 * with CMD_ASYNC. 197 * with CMD_ASYNC.
@@ -202,7 +201,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
202 * (i.e. mark it as non-idle). 201 * (i.e. mark it as non-idle).
203 */ 202 */
204enum CMD_MODE { 203enum CMD_MODE {
205 CMD_SYNC = 0,
206 CMD_ASYNC = BIT(0), 204 CMD_ASYNC = BIT(0),
207 CMD_WANT_SKB = BIT(1), 205 CMD_WANT_SKB = BIT(1),
208 CMD_SEND_IN_RFKILL = BIT(2), 206 CMD_SEND_IN_RFKILL = BIT(2),
@@ -427,7 +425,7 @@ struct iwl_trans;
427 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 425 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
428 * If RFkill is asserted in the middle of a SYNC host command, it must 426 * If RFkill is asserted in the middle of a SYNC host command, it must
429 * return -ERFKILL straight away. 427 * return -ERFKILL straight away.
430 * May sleep only if CMD_SYNC is set 428 * May sleep only if CMD_ASYNC is not set
431 * @tx: send an skb 429 * @tx: send an skb
432 * Must be atomic 430 * Must be atomic
433 * @reclaim: free packet until ssn. Returns a list of freed packets. 431 * @reclaim: free packet until ssn. Returns a list of freed packets.
@@ -437,8 +435,7 @@ struct iwl_trans;
437 * this one. The op_mode must not configure the HCMD queue. May sleep. 435 * this one. The op_mode must not configure the HCMD queue. May sleep.
438 * @txq_disable: de-configure a Tx queue to send AMPDUs 436 * @txq_disable: de-configure a Tx queue to send AMPDUs
439 * Must be atomic 437 * Must be atomic
440 * @wait_tx_queue_empty: wait until all tx queues are empty 438 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
441 * May sleep
442 * @dbgfs_register: add the dbgfs files under this directory. Files will be 439 * @dbgfs_register: add the dbgfs files under this directory. Files will be
443 * automatically deleted. 440 * automatically deleted.
444 * @write8: write a u8 to a register at offset ofs from the BAR 441 * @write8: write a u8 to a register at offset ofs from the BAR
@@ -464,6 +461,11 @@ struct iwl_trans;
464 * @unref: release a reference previously taken with @ref. Note that 461 * @unref: release a reference previously taken with @ref. Note that
465 * initially the reference count is 1, making an initial @unref 462 * initially the reference count is 1, making an initial @unref
466 * necessary to allow low power states. 463 * necessary to allow low power states.
464 * @dump_data: fill a data dump with debug data, maybe containing last
465 * TX'ed commands and similar. When called with a NULL buffer and
466 * zero buffer length, provide only the (estimated) required buffer
467 * length. Return the used buffer length.
468 * Note that the transport must fill in the proper file headers.
467 */ 469 */
468struct iwl_trans_ops { 470struct iwl_trans_ops {
469 471
@@ -471,6 +473,8 @@ struct iwl_trans_ops {
471 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 473 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
472 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 474 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
473 bool run_in_rfkill); 475 bool run_in_rfkill);
476 int (*update_sf)(struct iwl_trans *trans,
477 struct iwl_sf_region *st_fwrd_space);
474 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 478 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
475 void (*stop_device)(struct iwl_trans *trans); 479 void (*stop_device)(struct iwl_trans *trans);
476 480
@@ -490,7 +494,7 @@ struct iwl_trans_ops {
490 void (*txq_disable)(struct iwl_trans *trans, int queue); 494 void (*txq_disable)(struct iwl_trans *trans, int queue);
491 495
492 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 496 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
493 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 497 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
494 498
495 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 499 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
496 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 500 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -512,6 +516,10 @@ struct iwl_trans_ops {
512 u32 value); 516 u32 value);
513 void (*ref)(struct iwl_trans *trans); 517 void (*ref)(struct iwl_trans *trans);
514 void (*unref)(struct iwl_trans *trans); 518 void (*unref)(struct iwl_trans *trans);
519
520#ifdef CONFIG_IWLWIFI_DEBUGFS
521 u32 (*dump_data)(struct iwl_trans *trans, void *buf, u32 buflen);
522#endif
515}; 523};
516 524
517/** 525/**
@@ -630,6 +638,17 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
630 return trans->ops->start_fw(trans, fw, run_in_rfkill); 638 return trans->ops->start_fw(trans, fw, run_in_rfkill);
631} 639}
632 640
641static inline int iwl_trans_update_sf(struct iwl_trans *trans,
642 struct iwl_sf_region *st_fwrd_space)
643{
644 might_sleep();
645
646 if (trans->ops->update_sf)
647 return trans->ops->update_sf(trans, st_fwrd_space);
648
649 return 0;
650}
651
633static inline void iwl_trans_stop_device(struct iwl_trans *trans) 652static inline void iwl_trans_stop_device(struct iwl_trans *trans)
634{ 653{
635 might_sleep(); 654 might_sleep();
@@ -665,6 +684,16 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
665 trans->ops->unref(trans); 684 trans->ops->unref(trans);
666} 685}
667 686
687#ifdef CONFIG_IWLWIFI_DEBUGFS
688static inline u32 iwl_trans_dump_data(struct iwl_trans *trans,
689 void *buf, u32 buflen)
690{
691 if (!trans->ops->dump_data)
692 return 0;
693 return trans->ops->dump_data(trans, buf, buflen);
694}
695#endif
696
668static inline int iwl_trans_send_cmd(struct iwl_trans *trans, 697static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
669 struct iwl_host_cmd *cmd) 698 struct iwl_host_cmd *cmd)
670{ 699{
@@ -678,7 +707,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
678 return -EIO; 707 return -EIO;
679 708
680 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { 709 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
681 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 710 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
682 return -EIO; 711 return -EIO;
683 } 712 }
684 713
@@ -720,7 +749,7 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
720 return -EIO; 749 return -EIO;
721 750
722 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 751 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
723 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 752 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
724 753
725 return trans->ops->tx(trans, skb, dev_cmd, queue); 754 return trans->ops->tx(trans, skb, dev_cmd, queue);
726} 755}
@@ -729,7 +758,7 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
729 int ssn, struct sk_buff_head *skbs) 758 int ssn, struct sk_buff_head *skbs)
730{ 759{
731 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 760 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
732 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 761 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
733 762
734 trans->ops->reclaim(trans, queue, ssn, skbs); 763 trans->ops->reclaim(trans, queue, ssn, skbs);
735} 764}
@@ -746,7 +775,7 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
746 might_sleep(); 775 might_sleep();
747 776
748 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) 777 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
749 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 778 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
750 779
751 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid, 780 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
752 frame_limit, ssn); 781 frame_limit, ssn);
@@ -759,12 +788,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
759 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0); 788 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
760} 789}
761 790
762static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 791static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
792 u32 txq_bm)
763{ 793{
764 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 794 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
765 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state); 795 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
766 796
767 return trans->ops->wait_tx_queue_empty(trans); 797 return trans->ops->wait_tx_queue_empty(trans, txq_bm);
768} 798}
769 799
770static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 800static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ccdd3b7c4cce..c30d7f64ec1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -3,8 +3,9 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o coex.o 5iwlmvm-y += power.o coex.o
6iwlmvm-y += led.o tt.o offloading.o 6iwlmvm-y += tt.o offloading.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
8iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 9iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
9 10
10ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ 11ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 0489314425cb..c8c3b38228f0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -104,12 +104,9 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
104#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65) 104#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
105#define BT_ANTENNA_COUPLING_THRESHOLD (30) 105#define BT_ANTENNA_COUPLING_THRESHOLD (30)
106 106
107int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) 107static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
108{ 108{
109 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) 109 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
110 return 0;
111
112 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
113 sizeof(struct iwl_bt_coex_prio_tbl_cmd), 110 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
114 &iwl_bt_prio_tbl); 111 &iwl_bt_prio_tbl);
115} 112}
@@ -127,10 +124,10 @@ const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
127}; 124};
128 125
129static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { 126static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
130 cpu_to_le32(0xf0f0f0f0), 127 cpu_to_le32(0xf0f0f0f0), /* 50% */
131 cpu_to_le32(0xc0c0c0c0), 128 cpu_to_le32(0xc0c0c0c0), /* 25% */
132 cpu_to_le32(0xfcfcfcfc), 129 cpu_to_le32(0xfcfcfcfc), /* 75% */
133 cpu_to_le32(0xff00ff00), 130 cpu_to_le32(0xfefefefe), /* 87.5% */
134}; 131};
135 132
136static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { 133static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
@@ -303,8 +300,8 @@ static const __le64 iwl_ci_mask[][3] = {
303}; 300};
304 301
305static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { 302static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
306 cpu_to_le32(0x22002200), 303 cpu_to_le32(0x28412201),
307 cpu_to_le32(0x33113311), 304 cpu_to_le32(0x11118451),
308}; 305};
309 306
310struct corunning_block_luts { 307struct corunning_block_luts {
@@ -568,13 +565,13 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
568 .id = BT_CONFIG, 565 .id = BT_CONFIG,
569 .len = { sizeof(*bt_cmd), }, 566 .len = { sizeof(*bt_cmd), },
570 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 567 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
571 .flags = CMD_SYNC,
572 }; 568 };
573 int ret; 569 int ret;
574 u32 flags; 570 u32 flags;
575 571
576 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) 572 ret = iwl_send_bt_prio_tbl(mvm);
577 return 0; 573 if (ret)
574 return ret;
578 575
579 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); 576 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
580 if (!bt_cmd) 577 if (!bt_cmd)
@@ -582,10 +579,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
582 cmd.data[0] = bt_cmd; 579 cmd.data[0] = bt_cmd;
583 580
584 bt_cmd->max_kill = 5; 581 bt_cmd->max_kill = 5;
585 bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD, 582 bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
586 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling, 583 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
587 bt_cmd->bt4_tx_tx_delta_freq_thr = 15, 584 bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
588 bt_cmd->bt4_tx_rx_max_freq0 = 15, 585 bt_cmd->bt4_tx_rx_max_freq0 = 15;
586 bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
587 bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
589 588
590 flags = iwlwifi_mod_params.bt_coex_active ? 589 flags = iwlwifi_mod_params.bt_coex_active ?
591 BT_COEX_NW : BT_COEX_DISABLE; 590 BT_COEX_NW : BT_COEX_DISABLE;
@@ -663,7 +662,6 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
663 .data[0] = &bt_cmd, 662 .data[0] = &bt_cmd,
664 .len = { sizeof(*bt_cmd), }, 663 .len = { sizeof(*bt_cmd), },
665 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 664 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
666 .flags = CMD_SYNC,
667 }; 665 };
668 int ret = 0; 666 int ret = 0;
669 667
@@ -717,7 +715,8 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
717 return ret; 715 return ret;
718} 716}
719 717
720int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable) 718static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
719 bool enable)
721{ 720{
722 struct iwl_bt_coex_cmd *bt_cmd; 721 struct iwl_bt_coex_cmd *bt_cmd;
723 /* Send ASYNC since this can be sent from an atomic context */ 722 /* Send ASYNC since this can be sent from an atomic context */
@@ -735,8 +734,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
735 return 0; 734 return 0;
736 735
737 /* nothing to do */ 736 /* nothing to do */
738 if (mvmsta->bt_reduced_txpower_dbg || 737 if (mvmsta->bt_reduced_txpower == enable)
739 mvmsta->bt_reduced_txpower == enable)
740 return 0; 738 return 0;
741 739
742 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); 740 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -803,23 +801,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
803 801
804 switch (vif->type) { 802 switch (vif->type) {
805 case NL80211_IFTYPE_STATION: 803 case NL80211_IFTYPE_STATION:
804 /* Count BSSes vifs */
805 data->num_bss_ifaces++;
806 /* default smps_mode for BSS / P2P client is AUTOMATIC */ 806 /* default smps_mode for BSS / P2P client is AUTOMATIC */
807 smps_mode = IEEE80211_SMPS_AUTOMATIC; 807 smps_mode = IEEE80211_SMPS_AUTOMATIC;
808 data->num_bss_ifaces++;
809
810 /*
811 * Count unassoc BSSes, relax SMSP constraints
812 * and disable reduced Tx Power
813 */
814 if (!vif->bss_conf.assoc) {
815 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
816 smps_mode);
817 if (iwl_mvm_bt_coex_reduced_txp(mvm,
818 mvmvif->ap_sta_id,
819 false))
820 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
821 return;
822 }
823 break; 808 break;
824 case NL80211_IFTYPE_AP: 809 case NL80211_IFTYPE_AP:
825 /* default smps_mode for AP / GO is OFF */ 810 /* default smps_mode for AP / GO is OFF */
@@ -845,8 +830,12 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
845 /* ... relax constraints and disable rssi events */ 830 /* ... relax constraints and disable rssi events */
846 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, 831 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
847 smps_mode); 832 smps_mode);
848 if (vif->type == NL80211_IFTYPE_STATION) 833 data->reduced_tx_power = false;
834 if (vif->type == NL80211_IFTYPE_STATION) {
835 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
836 false);
849 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); 837 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
838 }
850 return; 839 return;
851 } 840 }
852 841
@@ -857,6 +846,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
857 smps_mode = vif->type == NL80211_IFTYPE_AP ? 846 smps_mode = vif->type == NL80211_IFTYPE_AP ?
858 IEEE80211_SMPS_OFF : 847 IEEE80211_SMPS_OFF :
859 IEEE80211_SMPS_DYNAMIC; 848 IEEE80211_SMPS_DYNAMIC;
849
850 /* relax SMPS contraints for next association */
851 if (!vif->bss_conf.assoc)
852 smps_mode = IEEE80211_SMPS_AUTOMATIC;
853
860 IWL_DEBUG_COEX(data->mvm, 854 IWL_DEBUG_COEX(data->mvm,
861 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", 855 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
862 mvmvif->id, data->notif->bt_status, bt_activity_grading, 856 mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -903,22 +897,18 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
903 /* if secondary is not NULL, it might be a GO */ 897 /* if secondary is not NULL, it might be a GO */
904 data->secondary = chanctx_conf; 898 data->secondary = chanctx_conf;
905 899
906 /* don't reduce the Tx power if in loose scheme */ 900 /*
901 * don't reduce the Tx power if one of these is true:
902 * we are in LOOSE
903 * single share antenna product
904 * BT is active
905 * we are associated
906 */
907 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || 907 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
908 mvm->cfg->bt_shared_single_ant) { 908 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
909 data->reduced_tx_power = false; 909 !data->notif->bt_status) {
910 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
911 return;
912 }
913
914 /* reduced Txpower only if BT is on, so ...*/
915 if (!data->notif->bt_status) {
916 /* ... cancel reduced Tx power ... */
917 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
918 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
919 data->reduced_tx_power = false; 910 data->reduced_tx_power = false;
920 911 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
921 /* ... and there is no need to get reports on RSSI any more. */
922 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); 912 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
923 return; 913 return;
924 } 914 }
@@ -1022,9 +1012,9 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
1022 1012
1023 /* Don't spam the fw with the same command over and over */ 1013 /* Don't spam the fw with the same command over and over */
1024 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) { 1014 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
1025 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC, 1015 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
1026 sizeof(cmd), &cmd)) 1016 sizeof(cmd), &cmd))
1027 IWL_ERR(mvm, "Failed to send BT_CI cmd"); 1017 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
1028 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); 1018 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
1029 } 1019 }
1030 1020
@@ -1039,7 +1029,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
1039 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); 1029 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1040} 1030}
1041 1031
1042/* upon association, the fw will send in BT Coex notification */
1043int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, 1032int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
1044 struct iwl_rx_cmd_buffer *rxb, 1033 struct iwl_rx_cmd_buffer *rxb,
1045 struct iwl_device_cmd *dev_cmd) 1034 struct iwl_device_cmd *dev_cmd)
@@ -1215,6 +1204,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
1215 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT; 1204 return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
1216} 1205}
1217 1206
1207bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1208 enum ieee80211_band band)
1209{
1210 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
1211
1212 if (band != IEEE80211_BAND_2GHZ)
1213 return false;
1214
1215 return bt_activity >= BT_LOW_TRAFFIC;
1216}
1217
1218u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 1218u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1219 struct ieee80211_tx_info *info, u8 ac) 1219 struct ieee80211_tx_info *info, u8 ac)
1220{ 1220{
@@ -1249,9 +1249,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1249 1249
1250void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) 1250void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
1251{ 1251{
1252 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
1253 return;
1254
1255 iwl_mvm_bt_coex_notif_handle(mvm); 1252 iwl_mvm_bt_coex_notif_handle(mvm);
1256} 1253}
1257 1254
@@ -1270,7 +1267,6 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1270 .id = BT_CONFIG, 1267 .id = BT_CONFIG,
1271 .len = { sizeof(*bt_cmd), }, 1268 .len = { sizeof(*bt_cmd), },
1272 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 1269 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1273 .flags = CMD_SYNC,
1274 }; 1270 };
1275 1271
1276 if (!IWL_MVM_BT_COEX_CORUNNING) 1272 if (!IWL_MVM_BT_COEX_CORUNNING)
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index e56f5a0edf85..645b3cfc29a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -193,8 +193,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
193 wkc.wep_key.key_offset = data->wep_key_idx; 193 wkc.wep_key.key_offset = data->wep_key_idx;
194 } 194 }
195 195
196 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, 196 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
197 sizeof(wkc), &wkc);
198 data->error = ret != 0; 197 data->error = ret != 0;
199 198
200 mvm->ptk_ivlen = key->iv_len; 199 mvm->ptk_ivlen = key->iv_len;
@@ -341,7 +340,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
341 struct iwl_host_cmd cmd = { 340 struct iwl_host_cmd cmd = {
342 .id = WOWLAN_PATTERNS, 341 .id = WOWLAN_PATTERNS,
343 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 342 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
344 .flags = CMD_SYNC,
345 }; 343 };
346 int i, err; 344 int i, err;
347 345
@@ -518,7 +516,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
518 .id = REMOTE_WAKE_CONFIG_CMD, 516 .id = REMOTE_WAKE_CONFIG_CMD,
519 .len = { sizeof(*cfg), }, 517 .len = { sizeof(*cfg), },
520 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 518 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
521 .flags = CMD_SYNC,
522 }; 519 };
523 int ret; 520 int ret;
524 521
@@ -666,10 +663,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
666 663
667 if (WARN_ON(!vif->bss_conf.assoc)) 664 if (WARN_ON(!vif->bss_conf.assoc))
668 return -EINVAL; 665 return -EINVAL;
669 /* hack */ 666
670 vif->bss_conf.assoc = false;
671 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 667 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
672 vif->bss_conf.assoc = true;
673 if (ret) 668 if (ret)
674 return ret; 669 return ret;
675 670
@@ -705,7 +700,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
705 return ret; 700 return ret;
706 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 701 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
707 702
708 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 703 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
709 if (ret) 704 if (ret)
710 return ret; 705 return ret;
711 706
@@ -719,7 +714,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
719 for (i = 1; i < MAX_BINDINGS; i++) 714 for (i = 1; i < MAX_BINDINGS; i++)
720 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); 715 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
721 716
722 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, 717 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
723 sizeof(quota_cmd), &quota_cmd); 718 sizeof(quota_cmd), &quota_cmd);
724 if (ret) 719 if (ret)
725 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 720 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
@@ -739,15 +734,13 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
739 }; 734 };
740 struct iwl_host_cmd cmd = { 735 struct iwl_host_cmd cmd = {
741 .id = NON_QOS_TX_COUNTER_CMD, 736 .id = NON_QOS_TX_COUNTER_CMD,
742 .flags = CMD_SYNC | CMD_WANT_SKB, 737 .flags = CMD_WANT_SKB,
743 }; 738 };
744 int err; 739 int err;
745 u32 size; 740 u32 size;
746 741
747 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) { 742 cmd.data[0] = &query_cmd;
748 cmd.data[0] = &query_cmd; 743 cmd.len[0] = sizeof(query_cmd);
749 cmd.len[0] = sizeof(query_cmd);
750 }
751 744
752 err = iwl_mvm_send_cmd(mvm, &cmd); 745 err = iwl_mvm_send_cmd(mvm, &cmd);
753 if (err) 746 if (err)
@@ -758,10 +751,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
758 err = -EINVAL; 751 err = -EINVAL;
759 } else { 752 } else {
760 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 753 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
761 /* new API returns next, not last-used seqno */ 754 /* firmware returns next, not last-used seqno */
762 if (mvm->fw->ucode_capa.flags & 755 err = (u16) (err - 0x10);
763 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
764 err = (u16) (err - 0x10);
765 } 756 }
766 757
767 iwl_free_resp(&cmd); 758 iwl_free_resp(&cmd);
@@ -785,11 +776,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
785 776
786 mvmvif->seqno_valid = false; 777 mvmvif->seqno_valid = false;
787 778
788 if (!(mvm->fw->ucode_capa.flags & 779 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
789 IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
790 return;
791
792 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
793 sizeof(query_cmd), &query_cmd)) 780 sizeof(query_cmd), &query_cmd))
794 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 781 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
795} 782}
@@ -804,7 +791,7 @@ iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
804 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID) 791 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
805 cmd_len = sizeof(*cmd); 792 cmd_len = sizeof(*cmd);
806 793
807 return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC, 794 return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
808 cmd_len, cmd); 795 cmd_len, cmd);
809} 796}
810 797
@@ -833,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
833 }; 820 };
834 struct iwl_host_cmd d3_cfg_cmd = { 821 struct iwl_host_cmd d3_cfg_cmd = {
835 .id = D3_CONFIG_CMD, 822 .id = D3_CONFIG_CMD,
836 .flags = CMD_SYNC | CMD_WANT_SKB, 823 .flags = CMD_WANT_SKB,
837 .data[0] = &d3_cfg_cmd_data, 824 .data[0] = &d3_cfg_cmd_data,
838 .len[0] = sizeof(d3_cfg_cmd_data), 825 .len[0] = sizeof(d3_cfg_cmd_data),
839 }; 826 };
@@ -983,7 +970,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
983 if (key_data.use_rsc_tsc) { 970 if (key_data.use_rsc_tsc) {
984 struct iwl_host_cmd rsc_tsc_cmd = { 971 struct iwl_host_cmd rsc_tsc_cmd = {
985 .id = WOWLAN_TSC_RSC_PARAM, 972 .id = WOWLAN_TSC_RSC_PARAM,
986 .flags = CMD_SYNC,
987 .data[0] = key_data.rsc_tsc, 973 .data[0] = key_data.rsc_tsc,
988 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 974 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
989 .len[0] = sizeof(*key_data.rsc_tsc), 975 .len[0] = sizeof(*key_data.rsc_tsc),
@@ -997,7 +983,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
997 if (key_data.use_tkip) { 983 if (key_data.use_tkip) {
998 ret = iwl_mvm_send_cmd_pdu(mvm, 984 ret = iwl_mvm_send_cmd_pdu(mvm,
999 WOWLAN_TKIP_PARAM, 985 WOWLAN_TKIP_PARAM,
1000 CMD_SYNC, sizeof(tkip_cmd), 986 0, sizeof(tkip_cmd),
1001 &tkip_cmd); 987 &tkip_cmd);
1002 if (ret) 988 if (ret)
1003 goto out; 989 goto out;
@@ -1014,8 +1000,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1014 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 1000 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
1015 1001
1016 ret = iwl_mvm_send_cmd_pdu(mvm, 1002 ret = iwl_mvm_send_cmd_pdu(mvm,
1017 WOWLAN_KEK_KCK_MATERIAL, 1003 WOWLAN_KEK_KCK_MATERIAL, 0,
1018 CMD_SYNC,
1019 sizeof(kek_kck_cmd), 1004 sizeof(kek_kck_cmd),
1020 &kek_kck_cmd); 1005 &kek_kck_cmd);
1021 if (ret) 1006 if (ret)
@@ -1031,7 +1016,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1031 if (ret) 1016 if (ret)
1032 goto out; 1017 goto out;
1033 1018
1034 ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC); 1019 ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
1035 if (ret) 1020 if (ret)
1036 goto out; 1021 goto out;
1037 1022
@@ -1043,7 +1028,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1043 if (ret) 1028 if (ret)
1044 goto out; 1029 goto out;
1045 1030
1046 ret = iwl_mvm_power_update_mac(mvm, vif); 1031 ret = iwl_mvm_power_update_mac(mvm);
1047 if (ret) 1032 if (ret)
1048 goto out; 1033 goto out;
1049 1034
@@ -1082,6 +1067,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1082 1067
1083int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1068int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1084{ 1069{
1070 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1071
1072 if (iwl_mvm_is_d0i3_supported(mvm)) {
1073 mutex_lock(&mvm->d0i3_suspend_mutex);
1074 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1075 mutex_unlock(&mvm->d0i3_suspend_mutex);
1076 return 0;
1077 }
1078
1085 return __iwl_mvm_suspend(hw, wowlan, false); 1079 return __iwl_mvm_suspend(hw, wowlan, false);
1086} 1080}
1087 1081
@@ -1277,7 +1271,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1277} 1271}
1278 1272
1279static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, 1273static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1280 struct iwl_wowlan_status_v6 *status) 1274 struct iwl_wowlan_status *status)
1281{ 1275{
1282 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; 1276 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1283 1277
@@ -1294,7 +1288,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1294} 1288}
1295 1289
1296struct iwl_mvm_d3_gtk_iter_data { 1290struct iwl_mvm_d3_gtk_iter_data {
1297 struct iwl_wowlan_status_v6 *status; 1291 struct iwl_wowlan_status *status;
1298 void *last_gtk; 1292 void *last_gtk;
1299 u32 cipher; 1293 u32 cipher;
1300 bool find_phase, unhandled_cipher; 1294 bool find_phase, unhandled_cipher;
@@ -1370,7 +1364,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
1370 1364
1371static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1365static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1372 struct ieee80211_vif *vif, 1366 struct ieee80211_vif *vif,
1373 struct iwl_wowlan_status_v6 *status) 1367 struct iwl_wowlan_status *status)
1374{ 1368{
1375 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1369 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1376 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1370 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1465,10 +1459,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1465 } err_info; 1459 } err_info;
1466 struct iwl_host_cmd cmd = { 1460 struct iwl_host_cmd cmd = {
1467 .id = WOWLAN_GET_STATUSES, 1461 .id = WOWLAN_GET_STATUSES,
1468 .flags = CMD_SYNC | CMD_WANT_SKB, 1462 .flags = CMD_WANT_SKB,
1469 }; 1463 };
1470 struct iwl_wowlan_status_data status; 1464 struct iwl_wowlan_status_data status;
1471 struct iwl_wowlan_status_v6 *status_v6; 1465 struct iwl_wowlan_status *fw_status;
1472 int ret, len, status_size, i; 1466 int ret, len, status_size, i;
1473 bool keep; 1467 bool keep;
1474 struct ieee80211_sta *ap_sta; 1468 struct ieee80211_sta *ap_sta;
@@ -1491,7 +1485,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1491 } 1485 }
1492 1486
1493 /* only for tracing for now */ 1487 /* only for tracing for now */
1494 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL); 1488 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1495 if (ret) 1489 if (ret)
1496 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1490 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1497 1491
@@ -1505,10 +1499,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1505 if (!cmd.resp_pkt) 1499 if (!cmd.resp_pkt)
1506 goto out_unlock; 1500 goto out_unlock;
1507 1501
1508 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) 1502 status_size = sizeof(*fw_status);
1509 status_size = sizeof(struct iwl_wowlan_status_v6);
1510 else
1511 status_size = sizeof(struct iwl_wowlan_status_v4);
1512 1503
1513 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1504 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1514 if (len < status_size) { 1505 if (len < status_size) {
@@ -1516,35 +1507,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1516 goto out_free_resp; 1507 goto out_free_resp;
1517 } 1508 }
1518 1509
1519 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) { 1510 fw_status = (void *)cmd.resp_pkt->data;
1520 status_v6 = (void *)cmd.resp_pkt->data; 1511
1521 1512 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1522 status.pattern_number = le16_to_cpu(status_v6->pattern_number); 1513 for (i = 0; i < 8; i++)
1523 for (i = 0; i < 8; i++) 1514 status.qos_seq_ctr[i] =
1524 status.qos_seq_ctr[i] = 1515 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1525 le16_to_cpu(status_v6->qos_seq_ctr[i]); 1516 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1526 status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons); 1517 status.wake_packet_length =
1527 status.wake_packet_length = 1518 le32_to_cpu(fw_status->wake_packet_length);
1528 le32_to_cpu(status_v6->wake_packet_length); 1519 status.wake_packet_bufsize =
1529 status.wake_packet_bufsize = 1520 le32_to_cpu(fw_status->wake_packet_bufsize);
1530 le32_to_cpu(status_v6->wake_packet_bufsize); 1521 status.wake_packet = fw_status->wake_packet;
1531 status.wake_packet = status_v6->wake_packet;
1532 } else {
1533 struct iwl_wowlan_status_v4 *status_v4;
1534 status_v6 = NULL;
1535 status_v4 = (void *)cmd.resp_pkt->data;
1536
1537 status.pattern_number = le16_to_cpu(status_v4->pattern_number);
1538 for (i = 0; i < 8; i++)
1539 status.qos_seq_ctr[i] =
1540 le16_to_cpu(status_v4->qos_seq_ctr[i]);
1541 status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
1542 status.wake_packet_length =
1543 le32_to_cpu(status_v4->wake_packet_length);
1544 status.wake_packet_bufsize =
1545 le32_to_cpu(status_v4->wake_packet_bufsize);
1546 status.wake_packet = status_v4->wake_packet;
1547 }
1548 1522
1549 if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) { 1523 if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
1550 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1524 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1545,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1571 1545
1572 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1546 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1573 1547
1574 keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6); 1548 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1575 1549
1576 iwl_free_resp(&cmd); 1550 iwl_free_resp(&cmd);
1577 return keep; 1551 return keep;
@@ -1674,6 +1648,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
1674{ 1648{
1675 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1649 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1676 1650
1651 if (iwl_mvm_is_d0i3_supported(mvm)) {
1652 bool exit_now;
1653
1654 mutex_lock(&mvm->d0i3_suspend_mutex);
1655 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1656 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
1657 &mvm->d0i3_suspend_flags);
1658 mutex_unlock(&mvm->d0i3_suspend_mutex);
1659 if (exit_now)
1660 _iwl_mvm_exit_d0i3(mvm);
1661 return 0;
1662 }
1663
1677 return __iwl_mvm_resume(mvm, false); 1664 return __iwl_mvm_resume(mvm, false);
1678} 1665}
1679 1666
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9b59e1d7ae71..2e90ff795c13 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
103 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); 103 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
104 dbgfs_pm->tx_data_timeout = val; 104 dbgfs_pm->tx_data_timeout = val;
105 break; 105 break;
106 case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
107 IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
108 dbgfs_pm->disable_power_off = val;
109 break;
110 case MVM_DEBUGFS_PM_LPRX_ENA: 106 case MVM_DEBUGFS_PM_LPRX_ENA:
111 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); 107 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
112 dbgfs_pm->lprx_ena = val; 108 dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
154 if (sscanf(buf + 16, "%d", &val) != 1) 150 if (sscanf(buf + 16, "%d", &val) != 1)
155 return -EINVAL; 151 return -EINVAL;
156 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; 152 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
157 } else if (!strncmp("disable_power_off=", buf, 18) &&
158 !(mvm->fw->ucode_capa.flags &
159 IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
160 if (sscanf(buf + 18, "%d", &val) != 1)
161 return -EINVAL;
162 param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
163 } else if (!strncmp("lprx=", buf, 5)) { 153 } else if (!strncmp("lprx=", buf, 5)) {
164 if (sscanf(buf + 5, "%d", &val) != 1) 154 if (sscanf(buf + 5, "%d", &val) != 1)
165 return -EINVAL; 155 return -EINVAL;
@@ -185,7 +175,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
185 175
186 mutex_lock(&mvm->mutex); 176 mutex_lock(&mvm->mutex);
187 iwl_dbgfs_update_pm(mvm, vif, param, val); 177 iwl_dbgfs_update_pm(mvm, vif, param, val);
188 ret = iwl_mvm_power_update_mac(mvm, vif); 178 ret = iwl_mvm_power_update_mac(mvm);
189 mutex_unlock(&mvm->mutex); 179 mutex_unlock(&mvm->mutex);
190 180
191 return ret ?: count; 181 return ret ?: count;
@@ -272,10 +262,9 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
272 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 262 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
273 263
274 pos += scnprintf(buf+pos, bufsz-pos, 264 pos += scnprintf(buf+pos, bufsz-pos,
275 "ap_sta_id %d - reduced Tx power %d force %d\n", 265 "ap_sta_id %d - reduced Tx power %d\n",
276 ap_sta_id, 266 ap_sta_id,
277 mvm_sta->bt_reduced_txpower, 267 mvm_sta->bt_reduced_txpower);
278 mvm_sta->bt_reduced_txpower_dbg);
279 } 268 }
280 } 269 }
281 270
@@ -293,41 +282,6 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
293 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 282 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
294} 283}
295 284
296static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
297 char *buf, size_t count,
298 loff_t *ppos)
299{
300 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
301 struct iwl_mvm *mvm = mvmvif->mvm;
302 struct iwl_mvm_sta *mvmsta;
303 bool reduced_tx_power;
304 int ret;
305
306 if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
307 return -ENOTCONN;
308
309 if (strtobool(buf, &reduced_tx_power) != 0)
310 return -EINVAL;
311
312 mutex_lock(&mvm->mutex);
313
314 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
315 if (IS_ERR_OR_NULL(mvmsta)) {
316 mutex_unlock(&mvm->mutex);
317 return -ENOTCONN;
318 }
319
320 mvmsta->bt_reduced_txpower_dbg = false;
321 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
322 reduced_tx_power);
323 if (!ret)
324 mvmsta->bt_reduced_txpower_dbg = true;
325
326 mutex_unlock(&mvm->mutex);
327
328 return ret ? : count;
329}
330
331static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, 285static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
332 enum iwl_dbgfs_bf_mask param, int value) 286 enum iwl_dbgfs_bf_mask param, int value)
333{ 287{
@@ -462,9 +416,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
462 mutex_lock(&mvm->mutex); 416 mutex_lock(&mvm->mutex);
463 iwl_dbgfs_update_bf(vif, param, value); 417 iwl_dbgfs_update_bf(vif, param, value);
464 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) 418 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
465 ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC); 419 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
466 else 420 else
467 ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC); 421 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
468 mutex_unlock(&mvm->mutex); 422 mutex_unlock(&mvm->mutex);
469 423
470 return ret ?: count; 424 return ret ?: count;
@@ -568,7 +522,6 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
568MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); 522MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
569MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); 523MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
570MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); 524MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
571MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
572 525
573void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 526void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
574{ 527{
@@ -592,8 +545,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
592 return; 545 return;
593 } 546 }
594 547
595 if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) && 548 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
596 iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
597 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || 549 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
598 (vif->type == NL80211_IFTYPE_STATION && vif->p2p && 550 (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
599 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))) 551 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
@@ -601,7 +553,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
601 S_IRUSR); 553 S_IRUSR);
602 554
603 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); 555 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
604 MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
605 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 556 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
606 S_IRUSR | S_IWUSR); 557 S_IRUSR | S_IWUSR);
607 558
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 1b52deea6081..29ca72695eaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -65,9 +65,8 @@
65#include "mvm.h" 65#include "mvm.h"
66#include "sta.h" 66#include "sta.h"
67#include "iwl-io.h" 67#include "iwl-io.h"
68#include "iwl-prph.h"
69#include "debugfs.h" 68#include "debugfs.h"
70#include "fw-error-dump.h" 69#include "iwl-fw-error-dump.h"
71 70
72static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, 71static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
73 size_t count, loff_t *ppos) 72 size_t count, loff_t *ppos)
@@ -136,9 +135,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
136 135
137 file->private_data = mvm->fw_error_dump; 136 file->private_data = mvm->fw_error_dump;
138 mvm->fw_error_dump = NULL; 137 mvm->fw_error_dump = NULL;
139 kfree(mvm->fw_error_sram);
140 mvm->fw_error_sram = NULL;
141 mvm->fw_error_sram_len = 0;
142 ret = 0; 138 ret = 0;
143 139
144out: 140out:
@@ -684,7 +680,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
684 mvm->restart_fw++; 680 mvm->restart_fw++;
685 681
686 /* take the return value to make compiler happy - it will fail anyway */ 682 /* take the return value to make compiler happy - it will fail anyway */
687 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL); 683 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
688 684
689 mutex_unlock(&mvm->mutex); 685 mutex_unlock(&mvm->mutex);
690 686
@@ -694,7 +690,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
694static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, 690static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
695 size_t count, loff_t *ppos) 691 size_t count, loff_t *ppos)
696{ 692{
697 iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1); 693 iwl_force_nmi(mvm->trans);
698 694
699 return count; 695 return count;
700} 696}
@@ -841,7 +837,7 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
841 /* send updated bcast filtering configuration */ 837 /* send updated bcast filtering configuration */
842 if (mvm->dbgfs_bcast_filtering.override && 838 if (mvm->dbgfs_bcast_filtering.override &&
843 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 839 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
844 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC, 840 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
845 sizeof(cmd), &cmd); 841 sizeof(cmd), &cmd);
846 mutex_unlock(&mvm->mutex); 842 mutex_unlock(&mvm->mutex);
847 843
@@ -913,7 +909,7 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
913 /* send updated bcast filtering configuration */ 909 /* send updated bcast filtering configuration */
914 if (mvm->dbgfs_bcast_filtering.override && 910 if (mvm->dbgfs_bcast_filtering.override &&
915 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 911 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
916 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC, 912 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
917 sizeof(cmd), &cmd); 913 sizeof(cmd), &cmd);
918 mutex_unlock(&mvm->mutex); 914 mutex_unlock(&mvm->mutex);
919 915
@@ -1004,6 +1000,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
1004 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); 1000 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
1005 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); 1001 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
1006 PRINT_MVM_REF(IWL_MVM_REF_USER); 1002 PRINT_MVM_REF(IWL_MVM_REF_USER);
1003 PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
1007 1004
1008 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1005 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1009} 1006}
@@ -1108,9 +1105,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
1108MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); 1105MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1109 1106
1110static const struct file_operations iwl_dbgfs_fw_error_dump_ops = { 1107static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
1111 .open = iwl_dbgfs_fw_error_dump_open, 1108 .open = iwl_dbgfs_fw_error_dump_open,
1112 .read = iwl_dbgfs_fw_error_dump_read, 1109 .read = iwl_dbgfs_fw_error_dump_read,
1113 .release = iwl_dbgfs_fw_error_dump_release, 1110 .release = iwl_dbgfs_fw_error_dump_release,
1114}; 1111};
1115 1112
1116#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1113#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1135,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1138 MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR); 1135 MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
1139 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); 1136 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
1140 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); 1137 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
1141 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD) 1138 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
1142 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 1139 S_IRUSR | S_IWUSR);
1143 S_IRUSR | S_IWUSR);
1144 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); 1140 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
1145 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR); 1141 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
1146 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); 1142 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 21877e5966a8..5fe82c29c8ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
141 BT_COEX_TX_DIS_LUT, 141 BT_COEX_TX_DIS_LUT,
142 142
143 BT_COEX_MAX_LUT, 143 BT_COEX_MAX_LUT,
144}; 144 BT_COEX_INVALID_LUT = 0xff,
145}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
145 146
146#define BT_COEX_LUT_SIZE (12) 147#define BT_COEX_LUT_SIZE (12)
147#define BT_COEX_CORUN_LUT_SIZE (32) 148#define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
154 * @flags:&enum iwl_bt_coex_flags 155 * @flags:&enum iwl_bt_coex_flags
155 * @max_kill: 156 * @max_kill:
156 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power 157 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
157 * @bt4_antenna_isolation: 158 * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
158 * @bt4_antenna_isolation_thr: 159 * should be set by default
159 * @bt4_tx_tx_delta_freq_thr: 160 * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
160 * @bt4_tx_rx_max_freq0: 161 * should be set by default
161 * @bt_prio_boost: 162 * @bt4_antenna_isolation: antenna isolation
163 * @bt4_antenna_isolation_thr: antenna threshold value
164 * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
165 * @bt4_tx_rx_max_freq0: TxRx max frequency
166 * @bt_prio_boost: BT priority boost registers
162 * @wifi_tx_prio_boost: SW boost of wifi tx priority 167 * @wifi_tx_prio_boost: SW boost of wifi tx priority
163 * @wifi_rx_prio_boost: SW boost of wifi rx priority 168 * @wifi_rx_prio_boost: SW boost of wifi rx priority
164 * @kill_ack_msk: 169 * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
165 * @kill_cts_msk: 170 * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
166 * @decision_lut: 171 * @decision_lut: PTA decision LUT, per Prio-Ch
167 * @bt4_multiprio_lut: 172 * @bt4_multiprio_lut: multi priority LUT configuration
168 * @bt4_corun_lut20: 173 * @bt4_corun_lut20: co-running 20 MHz LUT configuration
169 * @bt4_corun_lut40: 174 * @bt4_corun_lut40: co-running 40 MHz LUT configuration
170 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk 175 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
171 * 176 *
172 * The structure is used for the BT_COEX command. 177 * The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
175 __le32 flags; 180 __le32 flags;
176 u8 max_kill; 181 u8 max_kill;
177 u8 bt_reduced_tx_power; 182 u8 bt_reduced_tx_power;
178 u8 reserved[2]; 183 u8 override_primary_lut;
184 u8 override_secondary_lut;
179 185
180 u8 bt4_antenna_isolation; 186 u8 bt4_antenna_isolation;
181 u8 bt4_antenna_isolation_thr; 187 u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
194 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE]; 200 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
195 201
196 __le32 valid_bit_msk; 202 __le32 valid_bit_msk;
197} __packed; /* BT_COEX_CMD_API_S_VER_3 */ 203} __packed; /* BT_COEX_CMD_API_S_VER_5 */
198 204
199/** 205/**
200 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command 206 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
282 BT_ON_NO_CONNECTION = 1, 288 BT_ON_NO_CONNECTION = 1,
283 BT_LOW_TRAFFIC = 2, 289 BT_LOW_TRAFFIC = 2,
284 BT_HIGH_TRAFFIC = 3, 290 BT_HIGH_TRAFFIC = 3,
285}; 291}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
286 292
287/** 293/**
288 * struct iwl_bt_coex_profile_notif - notification about BT coex 294 * struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
310 __le32 primary_ch_lut; 316 __le32 primary_ch_lut;
311 __le32 secondary_ch_lut; 317 __le32 secondary_ch_lut;
312 __le32 bt_activity_grading; 318 __le32 bt_activity_grading;
313} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */ 319} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
314 320
315enum iwl_bt_coex_prio_table_event { 321enum iwl_bt_coex_prio_table_event {
316 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0, 322 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 10fcc1a79ebd..13696fe419b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
345 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), 345 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
346}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ 346}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
347 347
348struct iwl_wowlan_status_v4 {
349 __le64 replay_ctr;
350 __le16 pattern_number;
351 __le16 non_qos_seq_ctr;
352 __le16 qos_seq_ctr[8];
353 __le32 wakeup_reasons;
354 __le32 rekey_status;
355 __le32 num_of_gtk_rekeys;
356 __le32 transmitted_ndps;
357 __le32 received_beacons;
358 __le32 wake_packet_length;
359 __le32 wake_packet_bufsize;
360 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
361} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
362
363struct iwl_wowlan_gtk_status { 348struct iwl_wowlan_gtk_status {
364 u8 key_index; 349 u8 key_index;
365 u8 reserved[3]; 350 u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
368 struct iwl_wowlan_rsc_tsc_params_cmd rsc; 353 struct iwl_wowlan_rsc_tsc_params_cmd rsc;
369} __packed; 354} __packed;
370 355
371struct iwl_wowlan_status_v6 { 356struct iwl_wowlan_status {
372 struct iwl_wowlan_gtk_status gtk; 357 struct iwl_wowlan_gtk_status gtk;
373 __le64 replay_ctr; 358 __le64 replay_ctr;
374 __le16 pattern_number; 359 __le16 pattern_number;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 39148b5bb332..8bb5b94bf963 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -334,7 +334,7 @@ enum {
334 */ 334 */
335struct iwl_lq_cmd { 335struct iwl_lq_cmd {
336 u8 sta_id; 336 u8 sta_id;
337 u8 reserved1; 337 u8 reduced_tpc;
338 u16 control; 338 u16 control;
339 /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ 339 /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
340 u8 flags; 340 u8 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d73a89ecd78a..6959fda3fe09 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -169,8 +169,12 @@ enum iwl_scan_type {
169 SCAN_TYPE_DISCOVERY_FORCED = 6, 169 SCAN_TYPE_DISCOVERY_FORCED = 6,
170}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */ 170}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
171 171
172/* Maximal number of channels to scan */ 172/**
173#define MAX_NUM_SCAN_CHANNELS 0x24 173 * Maximal number of channels to scan
174 * it should be equal to:
175 * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
176 */
177#define MAX_NUM_SCAN_CHANNELS 50
174 178
175/** 179/**
176 * struct iwl_scan_cmd - scan request command 180 * struct iwl_scan_cmd - scan request command
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
534 * 538 *
535 * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering. 539 * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
536 * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan. 540 * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
537 * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan 541 * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
538 * on A band. 542 * beacon period. Finding channel activity in this mode is not guaranteed.
543 * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
544 * Assuming beacon period is 100ms finding channel activity is guaranteed.
539 */ 545 */
540enum iwl_scan_offload_flags { 546enum iwl_scan_offload_flags {
541 IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0), 547 IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
542 IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2), 548 IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
543 IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3), 549 IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
550 IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
544}; 551};
545 552
546/** 553/**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
563 IWL_SCAN_OFFLOAD_ABORTED = 2, 570 IWL_SCAN_OFFLOAD_ABORTED = 2,
564}; 571};
565 572
573enum iwl_scan_ebs_status {
574 IWL_SCAN_EBS_SUCCESS,
575 IWL_SCAN_EBS_FAILED,
576 IWL_SCAN_EBS_CHAN_NOT_FOUND,
577};
578
566/** 579/**
567 * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1 580 * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
568 * @last_schedule_line: last schedule line executed (fast or regular) 581 * @last_schedule_line: last schedule line executed (fast or regular)
569 * @last_schedule_iteration: last scan iteration executed before scan abort 582 * @last_schedule_iteration: last scan iteration executed before scan abort
570 * @status: enum iwl_scan_offload_compleate_status 583 * @status: enum iwl_scan_offload_compleate_status
584 * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
571 */ 585 */
572struct iwl_scan_offload_complete { 586struct iwl_scan_offload_complete {
573 u8 last_schedule_line; 587 u8 last_schedule_line;
574 u8 last_schedule_iteration; 588 u8 last_schedule_iteration;
575 u8 status; 589 u8 status;
576 u8 reserved; 590 u8 ebs_status;
577} __packed; 591} __packed;
578 592
579/** 593/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index d63647867262..39cebee8016f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
255} __packed; 255} __packed;
256 256
257/** 257/**
258 * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table. 258 * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
259 * ( REPLY_ADD_STA = 0x18 ) 259 * ( REPLY_ADD_STA = 0x18 )
260 * @add_modify: 1: modify existing, 0: add new station 260 * @add_modify: 1: modify existing, 0: add new station
261 * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent 261 * @awake_acs:
262 * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key 262 * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
263 * sent 263 * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
264 * @mac_id_n_color: the Mac context this station belongs to 264 * @mac_id_n_color: the Mac context this station belongs to
265 * @addr[ETH_ALEN]: station's MAC address 265 * @addr[ETH_ALEN]: station's MAC address
266 * @sta_id: index of station in uCode's station table 266 * @sta_id: index of station in uCode's station table
267 * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave 267 * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
268 * alone. 1 - modify, 0 - don't change. 268 * alone. 1 - modify, 0 - don't change.
269 * @key: look at %iwl_mvm_keyinfo
270 * @station_flags: look at %iwl_sta_flags 269 * @station_flags: look at %iwl_sta_flags
271 * @station_flags_msk: what of %station_flags have changed 270 * @station_flags_msk: what of %station_flags have changed
272 * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
273 * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
274 * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) 271 * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
275 * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set 272 * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
276 * add_immediate_ba_ssn. 273 * add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
294 * ADD_STA sets up the table entry for one station, either creating a new 291 * ADD_STA sets up the table entry for one station, either creating a new
295 * entry, or modifying a pre-existing one. 292 * entry, or modifying a pre-existing one.
296 */ 293 */
297struct iwl_mvm_add_sta_cmd_v5 { 294struct iwl_mvm_add_sta_cmd {
298 u8 add_modify;
299 u8 unicast_tx_key_id;
300 u8 multicast_tx_key_id;
301 u8 reserved1;
302 __le32 mac_id_n_color;
303 u8 addr[ETH_ALEN];
304 __le16 reserved2;
305 u8 sta_id;
306 u8 modify_mask;
307 __le16 reserved3;
308 struct iwl_mvm_keyinfo key;
309 __le32 station_flags;
310 __le32 station_flags_msk;
311 __le16 tid_disable_tx;
312 __le16 reserved4;
313 u8 add_immediate_ba_tid;
314 u8 remove_immediate_ba_tid;
315 __le16 add_immediate_ba_ssn;
316 __le16 sleep_tx_count;
317 __le16 sleep_state_flags;
318 __le16 assoc_id;
319 __le16 beamform_flags;
320 __le32 tfd_queue_msk;
321} __packed; /* ADD_STA_CMD_API_S_VER_5 */
322
323/**
324 * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
325 * VER_7 of this command is quite similar to VER_5 except
326 * exclusion of all fields related to the security key installation.
327 * It only differs from VER_6 by the "awake_acs" field that is
328 * reserved and ignored in VER_6.
329 */
330struct iwl_mvm_add_sta_cmd_v7 {
331 u8 add_modify; 295 u8 add_modify;
332 u8 awake_acs; 296 u8 awake_acs;
333 __le16 tid_disable_tx; 297 __le16 tid_disable_tx;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 8e122f3a7a74..6cc5f52b807f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
482 u8 pa_integ_res_b[3]; 482 u8 pa_integ_res_b[3];
483 u8 pa_integ_res_c[3]; 483 u8 pa_integ_res_c[3];
484 __le16 measurement_req_id; 484 __le16 measurement_req_id;
485 __le16 reserved; 485 u8 reduced_tpc;
486 u8 reserved;
486 487
487 __le32 tfd_info; 488 __le32 tfd_info;
488 __le16 seq_ctl; 489 __le16 seq_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 6e75b52588de..309a9b9a94fe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -71,6 +71,7 @@
71#include "fw-api-power.h" 71#include "fw-api-power.h"
72#include "fw-api-d3.h" 72#include "fw-api-d3.h"
73#include "fw-api-coex.h" 73#include "fw-api-coex.h"
74#include "fw-api-scan.h"
74 75
75/* maximal number of Tx queues in any platform */ 76/* maximal number of Tx queues in any platform */
76#define IWL_MVM_MAX_QUEUES 20 77#define IWL_MVM_MAX_QUEUES 20
@@ -604,52 +605,7 @@ enum {
604 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), 605 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
605}; /* MAC_EVENT_ACTION_API_E_VER_2 */ 606}; /* MAC_EVENT_ACTION_API_E_VER_2 */
606 607
607 608/* Time event - defines for command API */
608/**
609 * struct iwl_time_event_cmd_api_v1 - configuring Time Events
610 * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
611 * with version 2. determined by IWL_UCODE_TLV_FLAGS)
612 * ( TIME_EVENT_CMD = 0x29 )
613 * @id_and_color: ID and color of the relevant MAC
614 * @action: action to perform, one of FW_CTXT_ACTION_*
615 * @id: this field has two meanings, depending on the action:
616 * If the action is ADD, then it means the type of event to add.
617 * For all other actions it is the unique event ID assigned when the
618 * event was added by the FW.
619 * @apply_time: When to start the Time Event (in GP2)
620 * @max_delay: maximum delay to event's start (apply time), in TU
621 * @depends_on: the unique ID of the event we depend on (if any)
622 * @interval: interval between repetitions, in TU
623 * @interval_reciprocal: 2^32 / interval
624 * @duration: duration of event in TU
625 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
626 * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
627 * and TE_V1_EVENT_SOCIOPATHIC
628 * @is_present: 0 or 1, are we present or absent during the Time Event
629 * @max_frags: maximal number of fragments the Time Event can be divided to
630 * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
631 */
632struct iwl_time_event_cmd_v1 {
633 /* COMMON_INDEX_HDR_API_S_VER_1 */
634 __le32 id_and_color;
635 __le32 action;
636 __le32 id;
637 /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
638 __le32 apply_time;
639 __le32 max_delay;
640 __le32 dep_policy;
641 __le32 depends_on;
642 __le32 is_present;
643 __le32 max_frags;
644 __le32 interval;
645 __le32 interval_reciprocal;
646 __le32 duration;
647 __le32 repeat;
648 __le32 notify;
649} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
650
651
652/* Time event - defines for command API v2 */
653 609
654/* 610/*
655 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. 611 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
680#define TE_V2_PLACEMENT_POS 12 636#define TE_V2_PLACEMENT_POS 12
681#define TE_V2_ABSENCE_POS 15 637#define TE_V2_ABSENCE_POS 15
682 638
683/* Time event policy values (for time event cmd api v2) 639/* Time event policy values
684 * A notification (both event and fragment) includes a status indicating weather 640 * A notification (both event and fragment) includes a status indicating weather
685 * the FW was able to schedule the event or not. For fragment start/end 641 * the FW was able to schedule the event or not. For fragment start/end
686 * notification the status is always success. There is no start/end fragment 642 * notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
727}; 683};
728 684
729/** 685/**
730 * struct iwl_time_event_cmd_api_v2 - configuring Time Events 686 * struct iwl_time_event_cmd_api - configuring Time Events
731 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also 687 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
732 * with version 1. determined by IWL_UCODE_TLV_FLAGS) 688 * with version 1. determined by IWL_UCODE_TLV_FLAGS)
733 * ( TIME_EVENT_CMD = 0x29 ) 689 * ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
750 * TE_EVENT_SOCIOPATHIC 706 * TE_EVENT_SOCIOPATHIC
751 * using TE_ABSENCE and using TE_NOTIF_* 707 * using TE_ABSENCE and using TE_NOTIF_*
752 */ 708 */
753struct iwl_time_event_cmd_v2 { 709struct iwl_time_event_cmd {
754 /* COMMON_INDEX_HDR_API_S_VER_1 */ 710 /* COMMON_INDEX_HDR_API_S_VER_1 */
755 __le32 id_and_color; 711 __le32 id_and_color;
756 __le32 action; 712 __le32 action;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 7ce20062f32d..883e702152d5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -99,7 +99,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
99 }; 99 };
100 100
101 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 101 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
102 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC, 102 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
103 sizeof(tx_ant_cmd), &tx_ant_cmd); 103 sizeof(tx_ant_cmd), &tx_ant_cmd);
104} 104}
105 105
@@ -137,6 +137,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
137 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); 137 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
138 mvm->umac_error_event_table = 138 mvm->umac_error_event_table =
139 le32_to_cpu(palive2->error_info_addr); 139 le32_to_cpu(palive2->error_info_addr);
140 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
141 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
140 142
141 alive_data->valid = le16_to_cpu(palive2->status) == 143 alive_data->valid = le16_to_cpu(palive2->status) ==
142 IWL_ALIVE_STATUS_OK; 144 IWL_ALIVE_STATUS_OK;
@@ -180,6 +182,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
180 int ret, i; 182 int ret, i;
181 enum iwl_ucode_type old_type = mvm->cur_ucode; 183 enum iwl_ucode_type old_type = mvm->cur_ucode;
182 static const u8 alive_cmd[] = { MVM_ALIVE }; 184 static const u8 alive_cmd[] = { MVM_ALIVE };
185 struct iwl_sf_region st_fwrd_space;
183 186
184 fw = iwl_get_ucode_image(mvm, ucode_type); 187 fw = iwl_get_ucode_image(mvm, ucode_type);
185 if (WARN_ON(!fw)) 188 if (WARN_ON(!fw))
@@ -215,6 +218,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
215 return -EIO; 218 return -EIO;
216 } 219 }
217 220
221 /*
222 * update the sdio allocation according to the pointer we get in the
223 * alive notification.
224 */
225 st_fwrd_space.addr = mvm->sf_space.addr;
226 st_fwrd_space.size = mvm->sf_space.size;
227 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
228
218 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); 229 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
219 230
220 /* 231 /*
@@ -256,7 +267,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
256 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", 267 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
257 phy_cfg_cmd.phy_cfg); 268 phy_cfg_cmd.phy_cfg);
258 269
259 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC, 270 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
260 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 271 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
261} 272}
262 273
@@ -288,14 +299,14 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
288 goto error; 299 goto error;
289 } 300 }
290 301
291 ret = iwl_send_bt_prio_tbl(mvm); 302 ret = iwl_send_bt_init_conf(mvm);
292 if (ret) 303 if (ret)
293 goto error; 304 goto error;
294 305
295 /* Read the NVM only at driver load time, no need to do this twice */ 306 /* Read the NVM only at driver load time, no need to do this twice */
296 if (read_nvm) { 307 if (read_nvm) {
297 /* Read nvm */ 308 /* Read nvm */
298 ret = iwl_nvm_init(mvm); 309 ret = iwl_nvm_init(mvm, true);
299 if (ret) { 310 if (ret) {
300 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 311 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
301 goto error; 312 goto error;
@@ -303,7 +314,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
303 } 314 }
304 315
305 /* In case we read the NVM from external file, load it to the NIC */ 316 /* In case we read the NVM from external file, load it to the NIC */
306 if (iwlwifi_mod_params.nvm_file) 317 if (mvm->nvm_file_name)
307 iwl_mvm_load_nvm_to_nic(mvm); 318 iwl_mvm_load_nvm_to_nic(mvm);
308 319
309 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 320 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
@@ -424,10 +435,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
424 if (ret) 435 if (ret)
425 goto error; 436 goto error;
426 437
427 ret = iwl_send_bt_prio_tbl(mvm);
428 if (ret)
429 goto error;
430
431 ret = iwl_send_bt_init_conf(mvm); 438 ret = iwl_send_bt_init_conf(mvm);
432 if (ret) 439 if (ret)
433 goto error; 440 goto error;
@@ -468,12 +475,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
468 /* Initialize tx backoffs to the minimal possible */ 475 /* Initialize tx backoffs to the minimal possible */
469 iwl_mvm_tt_tx_backoff(mvm, 0); 476 iwl_mvm_tt_tx_backoff(mvm, 0);
470 477
471 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
472 ret = iwl_power_legacy_set_cam_mode(mvm);
473 if (ret)
474 goto error;
475 }
476
477 ret = iwl_mvm_power_update_device(mvm); 478 ret = iwl_mvm_power_update_device(mvm);
478 if (ret) 479 if (ret)
479 goto error; 480 goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 9ccec10bba16..8b5302777632 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
667 if (vif->bss_conf.qos) 667 if (vif->bss_conf.qos)
668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
669 669
670 /* Don't use cts to self as the fw doesn't support it currently. */
671 if (vif->bss_conf.use_cts_prot) { 670 if (vif->bss_conf.use_cts_prot) {
672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
673 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8) 672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
674 cmd->protection_flags |=
675 cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
676 } 673 }
677 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", 674 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
678 vif->bss_conf.use_cts_prot, 675 vif->bss_conf.use_cts_prot,
@@ -688,7 +685,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
688static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, 685static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
689 struct iwl_mac_ctx_cmd *cmd) 686 struct iwl_mac_ctx_cmd *cmd)
690{ 687{
691 int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC, 688 int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
692 sizeof(*cmd), cmd); 689 sizeof(*cmd), cmd);
693 if (ret) 690 if (ret)
694 IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", 691 IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
@@ -696,19 +693,39 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
696 return ret; 693 return ret;
697} 694}
698 695
699/* 696static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
700 * Fill the specific data for mac context of type station or p2p client 697 struct ieee80211_vif *vif,
701 */ 698 u32 action, bool force_assoc_off)
702static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
703 struct ieee80211_vif *vif,
704 struct iwl_mac_data_sta *ctxt_sta,
705 bool force_assoc_off)
706{ 699{
700 struct iwl_mac_ctx_cmd cmd = {};
701 struct iwl_mac_data_sta *ctxt_sta;
702
703 WARN_ON(vif->type != NL80211_IFTYPE_STATION);
704
705 /* Fill the common data for all mac context types */
706 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
707
708 if (vif->p2p) {
709 struct ieee80211_p2p_noa_attr *noa =
710 &vif->bss_conf.p2p_noa_attr;
711
712 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
713 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
714 ctxt_sta = &cmd.p2p_sta.sta;
715 } else {
716 ctxt_sta = &cmd.sta;
717 }
718
707 /* We need the dtim_period to set the MAC as associated */ 719 /* We need the dtim_period to set the MAC as associated */
708 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 720 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
709 !force_assoc_off) { 721 !force_assoc_off) {
710 u32 dtim_offs; 722 u32 dtim_offs;
711 723
724 /* Allow beacons to pass through as long as we are not
725 * associated, or we do not have dtim period information.
726 */
727 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
728
712 /* 729 /*
713 * The DTIM count counts down, so when it is N that means N 730 * The DTIM count counts down, so when it is N that means N
714 * more beacon intervals happen until the DTIM TBTT. Therefore 731 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -755,51 +772,6 @@ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
755 772
756 ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); 773 ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
757 ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); 774 ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
758}
759
760static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
761 struct ieee80211_vif *vif,
762 u32 action)
763{
764 struct iwl_mac_ctx_cmd cmd = {};
765
766 WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
767
768 /* Fill the common data for all mac context types */
769 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
770
771 /* Allow beacons to pass through as long as we are not associated,or we
772 * do not have dtim period information */
773 if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
774 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
775 else
776 cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
777
778 /* Fill the data specific for station mode */
779 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
780 action == FW_CTXT_ACTION_ADD);
781
782 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
783}
784
785static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
786 struct ieee80211_vif *vif,
787 u32 action)
788{
789 struct iwl_mac_ctx_cmd cmd = {};
790 struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
791
792 WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
793
794 /* Fill the common data for all mac context types */
795 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
796
797 /* Fill the data specific for station mode */
798 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
799 action == FW_CTXT_ACTION_ADD);
800
801 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
802 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
803 775
804 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 776 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
805} 777}
@@ -1137,16 +1109,12 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
1137} 1109}
1138 1110
1139static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1111static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1140 u32 action) 1112 u32 action, bool force_assoc_off)
1141{ 1113{
1142 switch (vif->type) { 1114 switch (vif->type) {
1143 case NL80211_IFTYPE_STATION: 1115 case NL80211_IFTYPE_STATION:
1144 if (!vif->p2p) 1116 return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
1145 return iwl_mvm_mac_ctxt_cmd_station(mvm, vif, 1117 force_assoc_off);
1146 action);
1147 else
1148 return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
1149 action);
1150 break; 1118 break;
1151 case NL80211_IFTYPE_AP: 1119 case NL80211_IFTYPE_AP:
1152 if (!vif->p2p) 1120 if (!vif->p2p)
@@ -1176,7 +1144,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1176 vif->addr, ieee80211_vif_type_p2p(vif))) 1144 vif->addr, ieee80211_vif_type_p2p(vif)))
1177 return -EIO; 1145 return -EIO;
1178 1146
1179 ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD); 1147 ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
1148 true);
1180 if (ret) 1149 if (ret)
1181 return ret; 1150 return ret;
1182 1151
@@ -1187,7 +1156,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1187 return 0; 1156 return 0;
1188} 1157}
1189 1158
1190int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1159int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1160 bool force_assoc_off)
1191{ 1161{
1192 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1193 1163
@@ -1195,7 +1165,8 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1195 vif->addr, ieee80211_vif_type_p2p(vif))) 1165 vif->addr, ieee80211_vif_type_p2p(vif)))
1196 return -EIO; 1166 return -EIO;
1197 1167
1198 return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY); 1168 return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
1169 force_assoc_off);
1199} 1170}
1200 1171
1201int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1172int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1214,7 +1185,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1214 mvmvif->color)); 1185 mvmvif->color));
1215 cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); 1186 cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
1216 1187
1217 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC, 1188 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
1218 sizeof(cmd), &cmd); 1189 sizeof(cmd), &cmd);
1219 if (ret) { 1190 if (ret) {
1220 IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); 1191 IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
@@ -1240,11 +1211,23 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1240 u32 rate __maybe_unused = 1211 u32 rate __maybe_unused =
1241 le32_to_cpu(beacon->beacon_notify_hdr.initial_rate); 1212 le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
1242 1213
1214 lockdep_assert_held(&mvm->mutex);
1215
1243 IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n", 1216 IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
1244 status & TX_STATUS_MSK, 1217 status & TX_STATUS_MSK,
1245 beacon->beacon_notify_hdr.failure_frame, 1218 beacon->beacon_notify_hdr.failure_frame,
1246 le64_to_cpu(beacon->tsf), 1219 le64_to_cpu(beacon->tsf),
1247 rate); 1220 rate);
1221
1222 if (unlikely(mvm->csa_vif && mvm->csa_vif->csa_active)) {
1223 if (!ieee80211_csa_is_complete(mvm->csa_vif)) {
1224 iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm->csa_vif);
1225 } else {
1226 ieee80211_csa_finish(mvm->csa_vif);
1227 mvm->csa_vif = NULL;
1228 }
1229 }
1230
1248 return 0; 1231 return 0;
1249} 1232}
1250 1233
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 8735ef1f44ae..7215f5980186 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -295,7 +295,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
295 !iwlwifi_mod_params.sw_crypto) 295 !iwlwifi_mod_params.sw_crypto)
296 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 296 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
297 297
298 if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { 298 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
299 IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
300 !iwlwifi_mod_params.uapsd_disable) {
299 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; 301 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
300 hw->uapsd_queues = IWL_UAPSD_AC_INFO; 302 hw->uapsd_queues = IWL_UAPSD_AC_INFO;
301 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 303 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -309,11 +311,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
309 BIT(NL80211_IFTYPE_P2P_CLIENT) | 311 BIT(NL80211_IFTYPE_P2P_CLIENT) |
310 BIT(NL80211_IFTYPE_AP) | 312 BIT(NL80211_IFTYPE_AP) |
311 BIT(NL80211_IFTYPE_P2P_GO) | 313 BIT(NL80211_IFTYPE_P2P_GO) |
312 BIT(NL80211_IFTYPE_P2P_DEVICE); 314 BIT(NL80211_IFTYPE_P2P_DEVICE) |
313 315 BIT(NL80211_IFTYPE_ADHOC);
314 /* IBSS has bugs in older versions */
315 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
316 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
317 316
318 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 317 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
319 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 318 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -322,6 +321,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
322 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) 321 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
323 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 322 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
324 323
324 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
325 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
326
325 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 327 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
326 hw->wiphy->n_iface_combinations = 328 hw->wiphy->n_iface_combinations =
327 ARRAY_SIZE(iwl_mvm_iface_combinations); 329 ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -365,14 +367,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
365 else 367 else
366 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 368 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
367 369
368 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { 370 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
369 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 371 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
370 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 372 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
371 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 373 /* we create the 802.11 header and zero length SSID IE. */
372 /* we create the 802.11 header and zero length SSID IE. */ 374 hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
373 hw->wiphy->max_sched_scan_ie_len =
374 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
375 }
376 375
377 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 376 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
378 NL80211_FEATURE_P2P_GO_OPPPS; 377 NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +385,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
386 } 385 }
387 386
388#ifdef CONFIG_PM_SLEEP 387#ifdef CONFIG_PM_SLEEP
389 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 388 if (iwl_mvm_is_d0i3_supported(mvm) &&
389 device_can_wakeup(mvm->trans->dev)) {
390 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
391 hw->wiphy->wowlan = &mvm->wowlan;
392 } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
390 mvm->trans->ops->d3_suspend && 393 mvm->trans->ops->d3_suspend &&
391 mvm->trans->ops->d3_resume && 394 mvm->trans->ops->d3_resume &&
392 device_can_wakeup(mvm->trans->dev)) { 395 device_can_wakeup(mvm->trans->dev)) {
@@ -540,13 +543,22 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
540 return -EACCES; 543 return -EACCES;
541 544
542 /* return from D0i3 before starting a new Tx aggregation */ 545 /* return from D0i3 before starting a new Tx aggregation */
543 if (action == IEEE80211_AMPDU_TX_START) { 546 switch (action) {
547 case IEEE80211_AMPDU_TX_START:
548 case IEEE80211_AMPDU_TX_STOP_CONT:
549 case IEEE80211_AMPDU_TX_STOP_FLUSH:
550 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
551 case IEEE80211_AMPDU_TX_OPERATIONAL:
544 iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG); 552 iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
545 tx_agg_ref = true; 553 tx_agg_ref = true;
546 554
547 /* 555 /*
548 * wait synchronously until D0i3 exit to get the correct 556 * for tx start, wait synchronously until D0i3 exit to
549 * sequence number for the tid 557 * get the correct sequence number for the tid.
558 * additionally, some other ampdu actions use direct
559 * target access, which is not handled automatically
560 * by the trans layer (unlike commands), so wait for
561 * d0i3 exit in these cases as well.
550 */ 562 */
551 if (!wait_event_timeout(mvm->d0i3_exit_waitq, 563 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
552 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) { 564 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
@@ -554,6 +566,9 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
554 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); 566 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
555 return -EIO; 567 return -EIO;
556 } 568 }
569 break;
570 default:
571 break;
557 } 572 }
558 573
559 mutex_lock(&mvm->mutex); 574 mutex_lock(&mvm->mutex);
@@ -758,7 +773,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
758 .pwr_restriction = cpu_to_le16(tx_power), 773 .pwr_restriction = cpu_to_le16(tx_power),
759 }; 774 };
760 775
761 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC, 776 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
762 sizeof(reduce_txpwr_cmd), 777 sizeof(reduce_txpwr_cmd),
763 &reduce_txpwr_cmd); 778 &reduce_txpwr_cmd);
764} 779}
@@ -817,18 +832,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
817 if (ret) 832 if (ret)
818 goto out_release; 833 goto out_release;
819 834
820 ret = iwl_mvm_power_update_mac(mvm, vif); 835 ret = iwl_mvm_power_update_mac(mvm);
821 if (ret) 836 if (ret)
822 goto out_release; 837 goto out_release;
823 838
824 /* beacon filtering */ 839 /* beacon filtering */
825 ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC); 840 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
826 if (ret) 841 if (ret)
827 goto out_remove_mac; 842 goto out_remove_mac;
828 843
829 if (!mvm->bf_allowed_vif && false && 844 if (!mvm->bf_allowed_vif &&
830 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 845 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
831 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
832 mvm->bf_allowed_vif = mvmvif; 846 mvm->bf_allowed_vif = mvmvif;
833 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 847 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
834 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 848 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -969,7 +983,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
969 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 983 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
970 mvm->vif_count--; 984 mvm->vif_count--;
971 985
972 iwl_mvm_power_update_mac(mvm, vif); 986 iwl_mvm_power_update_mac(mvm);
973 iwl_mvm_mac_ctxt_remove(mvm, vif); 987 iwl_mvm_mac_ctxt_remove(mvm, vif);
974 988
975out_release: 989out_release:
@@ -1223,10 +1237,14 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1223 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1237 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1224 return 0; 1238 return 0;
1225 1239
1240 /* bcast filtering isn't supported for P2P client */
1241 if (vif->p2p)
1242 return 0;
1243
1226 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1227 return 0; 1245 return 0;
1228 1246
1229 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC, 1247 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1230 sizeof(cmd), &cmd); 1248 sizeof(cmd), &cmd);
1231} 1249}
1232#else 1250#else
@@ -1253,7 +1271,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1253 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) 1271 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1254 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 1272 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1255 1273
1256 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 1274 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
1257 if (ret) 1275 if (ret)
1258 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1276 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1259 1277
@@ -1333,10 +1351,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1333 iwl_mvm_remove_time_event(mvm, mvmvif, 1351 iwl_mvm_remove_time_event(mvm, mvmvif,
1334 &mvmvif->time_event_data); 1352 &mvmvif->time_event_data);
1335 iwl_mvm_sf_update(mvm, vif, false); 1353 iwl_mvm_sf_update(mvm, vif, false);
1336 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC)); 1354 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1337 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | 1355 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
1338 BSS_CHANGED_QOS)) { 1356 BSS_CHANGED_QOS)) {
1339 ret = iwl_mvm_power_update_mac(mvm, vif); 1357 ret = iwl_mvm_power_update_mac(mvm);
1340 if (ret) 1358 if (ret)
1341 IWL_ERR(mvm, "failed to update power mode\n"); 1359 IWL_ERR(mvm, "failed to update power mode\n");
1342 } 1360 }
@@ -1347,16 +1365,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1347 } 1365 }
1348 1366
1349 if (changes & BSS_CHANGED_CQM) { 1367 if (changes & BSS_CHANGED_CQM) {
1350 IWL_DEBUG_MAC80211(mvm, "cqm info_changed"); 1368 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
1351 /* reset cqm events tracking */ 1369 /* reset cqm events tracking */
1352 mvmvif->bf_data.last_cqm_event = 0; 1370 mvmvif->bf_data.last_cqm_event = 0;
1353 ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC); 1371 if (mvmvif->bf_data.bf_enabled) {
1354 if (ret) 1372 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
1355 IWL_ERR(mvm, "failed to update CQM thresholds\n"); 1373 if (ret)
1374 IWL_ERR(mvm,
1375 "failed to update CQM thresholds\n");
1376 }
1356 } 1377 }
1357 1378
1358 if (changes & BSS_CHANGED_ARP_FILTER) { 1379 if (changes & BSS_CHANGED_ARP_FILTER) {
1359 IWL_DEBUG_MAC80211(mvm, "arp filter changed"); 1380 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
1360 iwl_mvm_configure_bcast_filter(mvm, vif); 1381 iwl_mvm_configure_bcast_filter(mvm, vif);
1361 } 1382 }
1362} 1383}
@@ -1402,7 +1423,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1402 mvmvif->ap_ibss_active = true; 1423 mvmvif->ap_ibss_active = true;
1403 1424
1404 /* power updated needs to be done before quotas */ 1425 /* power updated needs to be done before quotas */
1405 iwl_mvm_power_update_mac(mvm, vif); 1426 iwl_mvm_power_update_mac(mvm);
1406 1427
1407 ret = iwl_mvm_update_quotas(mvm, vif); 1428 ret = iwl_mvm_update_quotas(mvm, vif);
1408 if (ret) 1429 if (ret)
@@ -1410,7 +1431,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1410 1431
1411 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 1432 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
1412 if (vif->p2p && mvm->p2p_device_vif) 1433 if (vif->p2p && mvm->p2p_device_vif)
1413 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 1434 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
1414 1435
1415 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); 1436 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
1416 1437
@@ -1420,7 +1441,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
1420 return 0; 1441 return 0;
1421 1442
1422out_quota_failed: 1443out_quota_failed:
1423 iwl_mvm_power_update_mac(mvm, vif); 1444 iwl_mvm_power_update_mac(mvm);
1424 mvmvif->ap_ibss_active = false; 1445 mvmvif->ap_ibss_active = false;
1425 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 1446 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
1426out_unbind: 1447out_unbind:
@@ -1450,13 +1471,13 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
1450 1471
1451 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 1472 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
1452 if (vif->p2p && mvm->p2p_device_vif) 1473 if (vif->p2p && mvm->p2p_device_vif)
1453 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif); 1474 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
1454 1475
1455 iwl_mvm_update_quotas(mvm, NULL); 1476 iwl_mvm_update_quotas(mvm, NULL);
1456 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); 1477 iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
1457 iwl_mvm_binding_remove_vif(mvm, vif); 1478 iwl_mvm_binding_remove_vif(mvm, vif);
1458 1479
1459 iwl_mvm_power_update_mac(mvm, vif); 1480 iwl_mvm_power_update_mac(mvm);
1460 1481
1461 iwl_mvm_mac_ctxt_remove(mvm, vif); 1482 iwl_mvm_mac_ctxt_remove(mvm, vif);
1462 1483
@@ -1477,7 +1498,7 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
1477 1498
1478 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 1499 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
1479 BSS_CHANGED_BANDWIDTH) && 1500 BSS_CHANGED_BANDWIDTH) &&
1480 iwl_mvm_mac_ctxt_changed(mvm, vif)) 1501 iwl_mvm_mac_ctxt_changed(mvm, vif, false))
1481 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1502 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1482 1503
1483 /* Need to send a new beacon template to the FW */ 1504 /* Need to send a new beacon template to the FW */
@@ -1495,6 +1516,9 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
1495 1516
1496 mutex_lock(&mvm->mutex); 1517 mutex_lock(&mvm->mutex);
1497 1518
1519 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
1520 iwl_mvm_sched_scan_stop(mvm, true);
1521
1498 switch (vif->type) { 1522 switch (vif->type) {
1499 case NL80211_IFTYPE_STATION: 1523 case NL80211_IFTYPE_STATION:
1500 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 1524 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
@@ -1525,7 +1549,7 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
1525 1549
1526 switch (mvm->scan_status) { 1550 switch (mvm->scan_status) {
1527 case IWL_MVM_SCAN_SCHED: 1551 case IWL_MVM_SCAN_SCHED:
1528 ret = iwl_mvm_sched_scan_stop(mvm); 1552 ret = iwl_mvm_sched_scan_stop(mvm, true);
1529 if (ret) { 1553 if (ret) {
1530 ret = -EBUSY; 1554 ret = -EBUSY;
1531 goto out; 1555 goto out;
@@ -1697,6 +1721,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1697 ret = iwl_mvm_add_sta(mvm, vif, sta); 1721 ret = iwl_mvm_add_sta(mvm, vif, sta);
1698 } else if (old_state == IEEE80211_STA_NONE && 1722 } else if (old_state == IEEE80211_STA_NONE &&
1699 new_state == IEEE80211_STA_AUTH) { 1723 new_state == IEEE80211_STA_AUTH) {
1724 /*
1725 * EBS may be disabled due to previous failures reported by FW.
1726 * Reset EBS status here assuming environment has been changed.
1727 */
1728 mvm->last_ebs_successful = true;
1700 ret = 0; 1729 ret = 0;
1701 } else if (old_state == IEEE80211_STA_AUTH && 1730 } else if (old_state == IEEE80211_STA_AUTH &&
1702 new_state == IEEE80211_STA_ASSOC) { 1731 new_state == IEEE80211_STA_ASSOC) {
@@ -1708,14 +1737,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
1708 } else if (old_state == IEEE80211_STA_ASSOC && 1737 } else if (old_state == IEEE80211_STA_ASSOC &&
1709 new_state == IEEE80211_STA_AUTHORIZED) { 1738 new_state == IEEE80211_STA_AUTHORIZED) {
1710 /* enable beacon filtering */ 1739 /* enable beacon filtering */
1711 if (vif->bss_conf.dtim_period) 1740 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1712 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
1713 CMD_SYNC));
1714 ret = 0; 1741 ret = 0;
1715 } else if (old_state == IEEE80211_STA_AUTHORIZED && 1742 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
1716 new_state == IEEE80211_STA_ASSOC) { 1743 new_state == IEEE80211_STA_ASSOC) {
1717 /* disable beacon filtering */ 1744 /* disable beacon filtering */
1718 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC)); 1745 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
1719 ret = 0; 1746 ret = 0;
1720 } else if (old_state == IEEE80211_STA_ASSOC && 1747 } else if (old_state == IEEE80211_STA_ASSOC &&
1721 new_state == IEEE80211_STA_AUTH) { 1748 new_state == IEEE80211_STA_AUTH) {
@@ -1772,7 +1799,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
1772 int ret; 1799 int ret;
1773 1800
1774 mutex_lock(&mvm->mutex); 1801 mutex_lock(&mvm->mutex);
1775 ret = iwl_mvm_mac_ctxt_changed(mvm, vif); 1802 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
1776 mutex_unlock(&mvm->mutex); 1803 mutex_unlock(&mvm->mutex);
1777 return ret; 1804 return ret;
1778 } 1805 }
@@ -1865,7 +1892,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
1865 int ret; 1892 int ret;
1866 1893
1867 mutex_lock(&mvm->mutex); 1894 mutex_lock(&mvm->mutex);
1868 ret = iwl_mvm_sched_scan_stop(mvm); 1895 ret = iwl_mvm_sched_scan_stop(mvm, false);
1869 mutex_unlock(&mvm->mutex); 1896 mutex_unlock(&mvm->mutex);
1870 iwl_mvm_wait_for_async_handlers(mvm); 1897 iwl_mvm_wait_for_async_handlers(mvm);
1871 1898
@@ -2161,10 +2188,10 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
2161 return; 2188 return;
2162 2189
2163 mutex_lock(&mvm->mutex); 2190 mutex_lock(&mvm->mutex);
2191 iwl_mvm_bt_coex_vif_change(mvm);
2164 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 2192 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
2165 ctx->rx_chains_static, 2193 ctx->rx_chains_static,
2166 ctx->rx_chains_dynamic); 2194 ctx->rx_chains_dynamic);
2167 iwl_mvm_bt_coex_vif_change(mvm);
2168 mutex_unlock(&mvm->mutex); 2195 mutex_unlock(&mvm->mutex);
2169} 2196}
2170 2197
@@ -2184,6 +2211,11 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
2184 2211
2185 switch (vif->type) { 2212 switch (vif->type) {
2186 case NL80211_IFTYPE_AP: 2213 case NL80211_IFTYPE_AP:
2214 /* Unless it's a CSA flow we have nothing to do here */
2215 if (vif->csa_active) {
2216 mvmvif->ap_ibss_active = true;
2217 break;
2218 }
2187 case NL80211_IFTYPE_ADHOC: 2219 case NL80211_IFTYPE_ADHOC:
2188 /* 2220 /*
2189 * The AP binding flow is handled as part of the start_ap flow 2221 * The AP binding flow is handled as part of the start_ap flow
@@ -2207,7 +2239,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
2207 * Power state must be updated before quotas, 2239 * Power state must be updated before quotas,
2208 * otherwise fw will complain. 2240 * otherwise fw will complain.
2209 */ 2241 */
2210 iwl_mvm_power_update_mac(mvm, vif); 2242 iwl_mvm_power_update_mac(mvm);
2211 2243
2212 /* Setting the quota at this stage is only required for monitor 2244 /* Setting the quota at this stage is only required for monitor
2213 * interfaces. For the other types, the bss_info changed flow 2245 * interfaces. For the other types, the bss_info changed flow
@@ -2220,11 +2252,17 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
2220 goto out_remove_binding; 2252 goto out_remove_binding;
2221 } 2253 }
2222 2254
2255 /* Handle binding during CSA */
2256 if (vif->type == NL80211_IFTYPE_AP) {
2257 iwl_mvm_update_quotas(mvm, vif);
2258 iwl_mvm_mac_ctxt_changed(mvm, vif, false);
2259 }
2260
2223 goto out_unlock; 2261 goto out_unlock;
2224 2262
2225 out_remove_binding: 2263 out_remove_binding:
2226 iwl_mvm_binding_remove_vif(mvm, vif); 2264 iwl_mvm_binding_remove_vif(mvm, vif);
2227 iwl_mvm_power_update_mac(mvm, vif); 2265 iwl_mvm_power_update_mac(mvm);
2228 out_unlock: 2266 out_unlock:
2229 mutex_unlock(&mvm->mutex); 2267 mutex_unlock(&mvm->mutex);
2230 if (ret) 2268 if (ret)
@@ -2244,22 +2282,29 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
2244 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 2282 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
2245 2283
2246 switch (vif->type) { 2284 switch (vif->type) {
2247 case NL80211_IFTYPE_AP:
2248 case NL80211_IFTYPE_ADHOC: 2285 case NL80211_IFTYPE_ADHOC:
2249 goto out_unlock; 2286 goto out_unlock;
2250 case NL80211_IFTYPE_MONITOR: 2287 case NL80211_IFTYPE_MONITOR:
2251 mvmvif->monitor_active = false; 2288 mvmvif->monitor_active = false;
2252 iwl_mvm_update_quotas(mvm, NULL); 2289 iwl_mvm_update_quotas(mvm, NULL);
2253 break; 2290 break;
2291 case NL80211_IFTYPE_AP:
2292 /* This part is triggered only during CSA */
2293 if (!vif->csa_active || !mvmvif->ap_ibss_active)
2294 goto out_unlock;
2295
2296 mvmvif->ap_ibss_active = false;
2297 iwl_mvm_update_quotas(mvm, NULL);
2298 /*TODO: bt_coex notification here? */
2254 default: 2299 default:
2255 break; 2300 break;
2256 } 2301 }
2257 2302
2258 iwl_mvm_binding_remove_vif(mvm, vif); 2303 iwl_mvm_binding_remove_vif(mvm, vif);
2259 iwl_mvm_power_update_mac(mvm, vif);
2260 2304
2261out_unlock: 2305out_unlock:
2262 mvmvif->phy_ctxt = NULL; 2306 mvmvif->phy_ctxt = NULL;
2307 iwl_mvm_power_update_mac(mvm);
2263 mutex_unlock(&mvm->mutex); 2308 mutex_unlock(&mvm->mutex);
2264} 2309}
2265 2310
@@ -2323,9 +2368,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
2323 return -EINVAL; 2368 return -EINVAL;
2324 2369
2325 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 2370 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
2326 return iwl_mvm_enable_beacon_filter(mvm, vif, 2371 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2327 CMD_SYNC); 2372 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
2328 return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
2329 } 2373 }
2330 2374
2331 return -EOPNOTSUPP; 2375 return -EOPNOTSUPP;
@@ -2346,6 +2390,53 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
2346} 2390}
2347#endif 2391#endif
2348 2392
2393static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
2394 struct ieee80211_vif *vif,
2395 struct cfg80211_chan_def *chandef)
2396{
2397 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2398
2399 mutex_lock(&mvm->mutex);
2400 if (WARN(mvm->csa_vif && mvm->csa_vif->csa_active,
2401 "Another CSA is already in progress"))
2402 goto out_unlock;
2403
2404 IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
2405 chandef->center_freq1);
2406 mvm->csa_vif = vif;
2407
2408out_unlock:
2409 mutex_unlock(&mvm->mutex);
2410}
2411
2412static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
2413 struct ieee80211_vif *vif, u32 queues, bool drop)
2414{
2415 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2416 struct iwl_mvm_vif *mvmvif;
2417 struct iwl_mvm_sta *mvmsta;
2418
2419 if (!vif || vif->type != NL80211_IFTYPE_STATION)
2420 return;
2421
2422 mutex_lock(&mvm->mutex);
2423 mvmvif = iwl_mvm_vif_from_mac80211(vif);
2424 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
2425
2426 if (WARN_ON_ONCE(!mvmsta))
2427 goto done;
2428
2429 if (drop) {
2430 if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
2431 IWL_ERR(mvm, "flush request fail\n");
2432 } else {
2433 iwl_trans_wait_tx_queue_empty(mvm->trans,
2434 mvmsta->tfd_queue_msk);
2435 }
2436done:
2437 mutex_unlock(&mvm->mutex);
2438}
2439
2349const struct ieee80211_ops iwl_mvm_hw_ops = { 2440const struct ieee80211_ops iwl_mvm_hw_ops = {
2350 .tx = iwl_mvm_mac_tx, 2441 .tx = iwl_mvm_mac_tx,
2351 .ampdu_action = iwl_mvm_mac_ampdu_action, 2442 .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -2369,6 +2460,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
2369 .sta_rc_update = iwl_mvm_sta_rc_update, 2460 .sta_rc_update = iwl_mvm_sta_rc_update,
2370 .conf_tx = iwl_mvm_mac_conf_tx, 2461 .conf_tx = iwl_mvm_mac_conf_tx,
2371 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 2462 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
2463 .flush = iwl_mvm_mac_flush,
2372 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 2464 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
2373 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 2465 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
2374 .set_key = iwl_mvm_mac_set_key, 2466 .set_key = iwl_mvm_mac_set_key,
@@ -2388,6 +2480,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
2388 2480
2389 .set_tim = iwl_mvm_set_tim, 2481 .set_tim = iwl_mvm_set_tim,
2390 2482
2483 .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
2484
2391 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 2485 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
2392 2486
2393#ifdef CONFIG_PM_SLEEP 2487#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index f1ec0986c3c9..fcc6c29482d0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
164 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), 164 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
165 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), 165 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
166 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), 166 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
167 MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
168 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), 167 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
169 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), 168 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
170 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), 169 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
177 u32 tx_data_timeout; 176 u32 tx_data_timeout;
178 bool skip_over_dtim; 177 bool skip_over_dtim;
179 u8 skip_dtim_periods; 178 u8 skip_dtim_periods;
180 bool disable_power_off;
181 bool lprx_ena; 179 bool lprx_ena;
182 u32 lprx_rssi_threshold; 180 u32 lprx_rssi_threshold;
183 bool snooze_ena; 181 bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
232 IWL_MVM_REF_USER, 230 IWL_MVM_REF_USER,
233 IWL_MVM_REF_TX, 231 IWL_MVM_REF_TX,
234 IWL_MVM_REF_TX_AGG, 232 IWL_MVM_REF_TX_AGG,
233 IWL_MVM_REF_EXIT_WORK,
235 234
236 IWL_MVM_REF_COUNT, 235 IWL_MVM_REF_COUNT,
237}; 236};
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
265 * @uploaded: indicates the MAC context has been added to the device 264 * @uploaded: indicates the MAC context has been added to the device
266 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface 265 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
267 * should get quota etc. 266 * should get quota etc.
267 * @pm_enabled - Indicate if MAC power management is allowed
268 * @monitor_active: indicates that monitor context is configured, and that the 268 * @monitor_active: indicates that monitor context is configured, and that the
269 * interface should get quota etc. 269 * interface should get quota etc.
270 * @low_latency: indicates that this interface is in low-latency mode 270 * @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
283 283
284 bool uploaded; 284 bool uploaded;
285 bool ap_ibss_active; 285 bool ap_ibss_active;
286 bool pm_enabled;
286 bool monitor_active; 287 bool monitor_active;
287 bool low_latency; 288 bool low_latency;
288 struct iwl_mvm_vif_bf_data bf_data; 289 struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
451 int last_frame_idx; 452 int last_frame_idx;
452}; 453};
453 454
455enum {
456 D0I3_DEFER_WAKEUP,
457 D0I3_PENDING_WAKEUP,
458};
459
454struct iwl_mvm { 460struct iwl_mvm {
455 /* for logger access */ 461 /* for logger access */
456 struct device *dev; 462 struct device *dev;
@@ -484,6 +490,7 @@ struct iwl_mvm {
484 u32 log_event_table; 490 u32 log_event_table;
485 u32 umac_error_event_table; 491 u32 umac_error_event_table;
486 bool support_umac_log; 492 bool support_umac_log;
493 struct iwl_sf_region sf_space;
487 494
488 u32 ampdu_ref; 495 u32 ampdu_ref;
489 496
@@ -495,6 +502,7 @@ struct iwl_mvm {
495 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; 502 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
496 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; 503 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
497 504
505 const char *nvm_file_name;
498 struct iwl_nvm_data *nvm_data; 506 struct iwl_nvm_data *nvm_data;
499 /* NVM sections */ 507 /* NVM sections */
500 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; 508 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@@ -535,6 +543,8 @@ struct iwl_mvm {
535 /* Internal station */ 543 /* Internal station */
536 struct iwl_mvm_int_sta aux_sta; 544 struct iwl_mvm_int_sta aux_sta;
537 545
546 bool last_ebs_successful;
547
538 u8 scan_last_antenna_idx; /* to toggle TX between antennas */ 548 u8 scan_last_antenna_idx; /* to toggle TX between antennas */
539 u8 mgmt_last_antenna_idx; 549 u8 mgmt_last_antenna_idx;
540 550
@@ -578,8 +588,12 @@ struct iwl_mvm {
578 void *fw_error_dump; 588 void *fw_error_dump;
579 void *fw_error_sram; 589 void *fw_error_sram;
580 u32 fw_error_sram_len; 590 u32 fw_error_sram_len;
591 u32 *fw_error_rxf;
592 u32 fw_error_rxf_len;
581 593
594#ifdef CONFIG_IWLWIFI_LEDS
582 struct led_classdev led; 595 struct led_classdev led;
596#endif
583 597
584 struct ieee80211_vif *p2p_device_vif; 598 struct ieee80211_vif *p2p_device_vif;
585 599
@@ -601,6 +615,9 @@ struct iwl_mvm {
601 bool d0i3_offloading; 615 bool d0i3_offloading;
602 struct work_struct d0i3_exit_work; 616 struct work_struct d0i3_exit_work;
603 struct sk_buff_head d0i3_tx; 617 struct sk_buff_head d0i3_tx;
618 /* protect d0i3_suspend_flags */
619 struct mutex d0i3_suspend_mutex;
620 unsigned long d0i3_suspend_flags;
604 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ 621 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
605 spinlock_t d0i3_tx_lock; 622 spinlock_t d0i3_tx_lock;
606 wait_queue_head_t d0i3_exit_waitq; 623 wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +646,8 @@ struct iwl_mvm {
629 646
630 /* Indicate if device power save is allowed */ 647 /* Indicate if device power save is allowed */
631 bool ps_disabled; 648 bool ps_disabled;
632 /* Indicate if device power management is allowed */ 649
633 bool pm_disabled; 650 struct ieee80211_vif *csa_vif;
634}; 651};
635 652
636/* Extract MVM priv from op_mode and _hw */ 653/* Extract MVM priv from op_mode and _hw */
@@ -705,6 +722,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
705#ifdef CONFIG_IWLWIFI_DEBUGFS 722#ifdef CONFIG_IWLWIFI_DEBUGFS
706void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); 723void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
707void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm); 724void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
725void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
708#endif 726#endif
709u8 first_antenna(u8 mask); 727u8 first_antenna(u8 mask);
710u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); 728u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -745,7 +763,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
745 struct iwl_device_cmd *cmd); 763 struct iwl_device_cmd *cmd);
746 764
747/* NVM */ 765/* NVM */
748int iwl_nvm_init(struct iwl_mvm *mvm); 766int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
749int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); 767int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
750 768
751int iwl_mvm_up(struct iwl_mvm *mvm); 769int iwl_mvm_up(struct iwl_mvm *mvm);
@@ -796,7 +814,8 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
796int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 814int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
797void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 815void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
798int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 816int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
799int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 817int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
818 bool force_assoc_off);
800int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 819int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
801u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, 820u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
802 struct ieee80211_vif *vif); 821 struct ieee80211_vif *vif);
@@ -840,7 +859,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
840 struct cfg80211_sched_scan_request *req); 859 struct cfg80211_sched_scan_request *req);
841int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, 860int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
842 struct cfg80211_sched_scan_request *req); 861 struct cfg80211_sched_scan_request *req);
843int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm); 862int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify);
844int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm, 863int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
845 struct iwl_rx_cmd_buffer *rxb, 864 struct iwl_rx_cmd_buffer *rxb,
846 struct iwl_device_cmd *cmd); 865 struct iwl_device_cmd *cmd);
@@ -874,10 +893,8 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
874int rs_pretty_print_rate(char *buf, const u32 rate); 893int rs_pretty_print_rate(char *buf, const u32 rate);
875 894
876/* power management */ 895/* power management */
877int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
878
879int iwl_mvm_power_update_device(struct iwl_mvm *mvm); 896int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
880int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 897int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
881int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 898int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
882 char *buf, int bufsz); 899 char *buf, int bufsz);
883 900
@@ -886,8 +903,18 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
886 struct iwl_rx_cmd_buffer *rxb, 903 struct iwl_rx_cmd_buffer *rxb,
887 struct iwl_device_cmd *cmd); 904 struct iwl_device_cmd *cmd);
888 905
906#ifdef CONFIG_IWLWIFI_LEDS
889int iwl_mvm_leds_init(struct iwl_mvm *mvm); 907int iwl_mvm_leds_init(struct iwl_mvm *mvm);
890void iwl_mvm_leds_exit(struct iwl_mvm *mvm); 908void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
909#else
910static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
911{
912 return 0;
913}
914static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
915{
916}
917#endif
891 918
892/* D3 (WoWLAN, NetDetect) */ 919/* D3 (WoWLAN, NetDetect) */
893int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); 920int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
@@ -922,9 +949,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
922void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); 949void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
923void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); 950void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
924void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); 951void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
952int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
925 953
926/* BT Coex */ 954/* BT Coex */
927int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
928int iwl_send_bt_init_conf(struct iwl_mvm *mvm); 955int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
929int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, 956int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
930 struct iwl_rx_cmd_buffer *rxb, 957 struct iwl_rx_cmd_buffer *rxb,
@@ -936,9 +963,10 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
936 struct ieee80211_sta *sta); 963 struct ieee80211_sta *sta);
937bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, 964bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
938 struct ieee80211_sta *sta); 965 struct ieee80211_sta *sta);
966bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
967 enum ieee80211_band band);
939u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 968u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
940 struct ieee80211_tx_info *info, u8 ac); 969 struct ieee80211_tx_info *info, u8 ac);
941int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
942 970
943enum iwl_bt_kill_msk { 971enum iwl_bt_kill_msk {
944 BT_KILL_MSK_DEFAULT, 972 BT_KILL_MSK_DEFAULT,
@@ -969,17 +997,11 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
969int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 997int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
970 struct ieee80211_vif *vif, 998 struct ieee80211_vif *vif,
971 u32 flags); 999 u32 flags);
972int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
973 struct ieee80211_vif *vif, bool enable);
974int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
975 struct ieee80211_vif *vif,
976 bool force,
977 u32 flags);
978
979/* SMPS */ 1000/* SMPS */
980void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1001void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
981 enum iwl_mvm_smps_type_request req_type, 1002 enum iwl_mvm_smps_type_request req_type,
982 enum ieee80211_smps_mode smps_request); 1003 enum ieee80211_smps_mode smps_request);
1004bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
983 1005
984/* Low latency */ 1006/* Low latency */
985int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1007int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index cf2d09f53782..808f78f6fbf9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -74,6 +74,12 @@
74#define NVM_WRITE_OPCODE 1 74#define NVM_WRITE_OPCODE 1
75#define NVM_READ_OPCODE 0 75#define NVM_READ_OPCODE 0
76 76
77/* load nvm chunk response */
78enum {
79 READ_NVM_CHUNK_SUCCEED = 0,
80 READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
81};
82
77/* 83/*
78 * prepare the NVM host command w/ the pointers to the nvm buffer 84 * prepare the NVM host command w/ the pointers to the nvm buffer
79 * and send it to fw 85 * and send it to fw
@@ -90,7 +96,7 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
90 struct iwl_host_cmd cmd = { 96 struct iwl_host_cmd cmd = {
91 .id = NVM_ACCESS_CMD, 97 .id = NVM_ACCESS_CMD,
92 .len = { sizeof(struct iwl_nvm_access_cmd), length }, 98 .len = { sizeof(struct iwl_nvm_access_cmd), length },
93 .flags = CMD_SYNC | CMD_SEND_IN_RFKILL, 99 .flags = CMD_SEND_IN_RFKILL,
94 .data = { &nvm_access_cmd, data }, 100 .data = { &nvm_access_cmd, data },
95 /* data may come from vmalloc, so use _DUP */ 101 /* data may come from vmalloc, so use _DUP */
96 .dataflags = { 0, IWL_HCMD_DFL_DUP }, 102 .dataflags = { 0, IWL_HCMD_DFL_DUP },
@@ -112,7 +118,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
112 struct iwl_rx_packet *pkt; 118 struct iwl_rx_packet *pkt;
113 struct iwl_host_cmd cmd = { 119 struct iwl_host_cmd cmd = {
114 .id = NVM_ACCESS_CMD, 120 .id = NVM_ACCESS_CMD,
115 .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 121 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
116 .data = { &nvm_access_cmd, }, 122 .data = { &nvm_access_cmd, },
117 }; 123 };
118 int ret, bytes_read, offset_read; 124 int ret, bytes_read, offset_read;
@@ -139,10 +145,26 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
139 offset_read = le16_to_cpu(nvm_resp->offset); 145 offset_read = le16_to_cpu(nvm_resp->offset);
140 resp_data = nvm_resp->data; 146 resp_data = nvm_resp->data;
141 if (ret) { 147 if (ret) {
142 IWL_ERR(mvm, 148 if ((offset != 0) &&
143 "NVM access command failed with status %d (device: %s)\n", 149 (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
144 ret, mvm->cfg->name); 150 /*
145 ret = -EINVAL; 151 * meaning of NOT_VALID_ADDRESS:
152 * driver try to read chunk from address that is
153 * multiple of 2K and got an error since addr is empty.
154 * meaning of (offset != 0): driver already
155 * read valid data from another chunk so this case
156 * is not an error.
157 */
158 IWL_DEBUG_EEPROM(mvm->trans->dev,
159 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
160 offset);
161 ret = 0;
162 } else {
163 IWL_DEBUG_EEPROM(mvm->trans->dev,
164 "NVM access command failed with status %d (device: %s)\n",
165 ret, mvm->cfg->name);
166 ret = -EIO;
167 }
146 goto exit; 168 goto exit;
147 } 169 }
148 170
@@ -211,9 +233,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
211 while (ret == length) { 233 while (ret == length) {
212 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); 234 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
213 if (ret < 0) { 235 if (ret < 0) {
214 IWL_ERR(mvm, 236 IWL_DEBUG_EEPROM(mvm->trans->dev,
215 "Cannot read NVM from section %d offset %d, length %d\n", 237 "Cannot read NVM from section %d offset %d, length %d\n",
216 section, offset, length); 238 section, offset, length);
217 return ret; 239 return ret;
218 } 240 }
219 offset += ret; 241 offset += ret;
@@ -238,13 +260,20 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
238 return NULL; 260 return NULL;
239 } 261 }
240 } else { 262 } else {
263 /* SW and REGULATORY sections are mandatory */
241 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 264 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
242 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
243 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { 265 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
244 IWL_ERR(mvm, 266 IWL_ERR(mvm,
245 "Can't parse empty family 8000 NVM sections\n"); 267 "Can't parse empty family 8000 NVM sections\n");
246 return NULL; 268 return NULL;
247 } 269 }
270 /* MAC_OVERRIDE or at least HW section must exist */
271 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
272 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
273 IWL_ERR(mvm,
274 "Can't parse mac_address, empty sections\n");
275 return NULL;
276 }
248 } 277 }
249 278
250 if (WARN_ON(!mvm->cfg)) 279 if (WARN_ON(!mvm->cfg))
@@ -311,16 +340,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
311 * get here after that we assume the NVM request can be satisfied 340 * get here after that we assume the NVM request can be satisfied
312 * synchronously. 341 * synchronously.
313 */ 342 */
314 ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, 343 ret = request_firmware(&fw_entry, mvm->nvm_file_name,
315 mvm->trans->dev); 344 mvm->trans->dev);
316 if (ret) { 345 if (ret) {
317 IWL_ERR(mvm, "ERROR: %s isn't available %d\n", 346 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
318 iwlwifi_mod_params.nvm_file, ret); 347 mvm->nvm_file_name, ret);
319 return ret; 348 return ret;
320 } 349 }
321 350
322 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", 351 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
323 iwlwifi_mod_params.nvm_file, fw_entry->size); 352 mvm->nvm_file_name, fw_entry->size);
324 353
325 if (fw_entry->size < sizeof(*file_sec)) { 354 if (fw_entry->size < sizeof(*file_sec)) {
326 IWL_ERR(mvm, "NVM file too small\n"); 355 IWL_ERR(mvm, "NVM file too small\n");
@@ -427,53 +456,28 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
427 return ret; 456 return ret;
428} 457}
429 458
430int iwl_nvm_init(struct iwl_mvm *mvm) 459int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
431{ 460{
432 int ret, i, section; 461 int ret, section;
433 u8 *nvm_buffer, *temp; 462 u8 *nvm_buffer, *temp;
434 int nvm_to_read[NVM_MAX_NUM_SECTIONS];
435 int num_of_sections_to_read;
436 463
437 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) 464 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
438 return -EINVAL; 465 return -EINVAL;
439 466
440 /* load external NVM if configured */ 467 /* load NVM values from nic */
441 if (iwlwifi_mod_params.nvm_file) { 468 if (read_nvm_from_nic) {
442 /* move to External NVM flow */
443 ret = iwl_mvm_read_external_nvm(mvm);
444 if (ret)
445 return ret;
446 } else {
447 /* list of NVM sections we are allowed/need to read */
448 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
449 nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
450 nvm_to_read[1] = NVM_SECTION_TYPE_SW;
451 nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
452 nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
453 num_of_sections_to_read = 4;
454 } else {
455 nvm_to_read[0] = NVM_SECTION_TYPE_SW;
456 nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
457 nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
458 nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
459 nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
460 num_of_sections_to_read = 5;
461 }
462
463 /* Read From FW NVM */ 469 /* Read From FW NVM */
464 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); 470 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
465 471
466 /* TODO: find correct NVM max size for a section */
467 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, 472 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
468 GFP_KERNEL); 473 GFP_KERNEL);
469 if (!nvm_buffer) 474 if (!nvm_buffer)
470 return -ENOMEM; 475 return -ENOMEM;
471 for (i = 0; i < num_of_sections_to_read; i++) { 476 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
472 section = nvm_to_read[i];
473 /* we override the constness for initial read */ 477 /* we override the constness for initial read */
474 ret = iwl_nvm_read_section(mvm, section, nvm_buffer); 478 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
475 if (ret < 0) 479 if (ret < 0)
476 break; 480 continue;
477 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); 481 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
478 if (!temp) { 482 if (!temp) {
479 ret = -ENOMEM; 483 ret = -ENOMEM;
@@ -502,15 +506,21 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
502 mvm->nvm_hw_blob.size = ret; 506 mvm->nvm_hw_blob.size = ret;
503 break; 507 break;
504 } 508 }
505 WARN(1, "section: %d", section);
506 } 509 }
507#endif 510#endif
508 } 511 }
509 kfree(nvm_buffer); 512 kfree(nvm_buffer);
510 if (ret < 0) 513 }
514
515 /* load external NVM if configured */
516 if (mvm->nvm_file_name) {
517 /* move to External NVM flow */
518 ret = iwl_mvm_read_external_nvm(mvm);
519 if (ret)
511 return ret; 520 return ret;
512 } 521 }
513 522
523 /* parse the relevant nvm sections */
514 mvm->nvm_data = iwl_parse_nvm_sections(mvm); 524 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
515 if (!mvm->nvm_data) 525 if (!mvm->nvm_data)
516 return -ENODATA; 526 return -ENODATA;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 9545d7fdd4bf..cc2f7de396de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -79,8 +79,8 @@
79#include "iwl-prph.h" 79#include "iwl-prph.h"
80#include "rs.h" 80#include "rs.h"
81#include "fw-api-scan.h" 81#include "fw-api-scan.h"
82#include "fw-error-dump.h"
83#include "time-event.h" 82#include "time-event.h"
83#include "iwl-fw-error-dump.h"
84 84
85/* 85/*
86 * module name, copyright, version, etc. 86 * module name, copyright, version, etc.
@@ -220,7 +220,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
220 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), 220 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
221 221
222 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), 222 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
223 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false), 223 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
224 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), 224 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
225 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, 225 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
226 iwl_mvm_rx_ant_coupling_notif, true), 226 iwl_mvm_rx_ant_coupling_notif, true),
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
402 mvm->sf_state = SF_UNINIT; 402 mvm->sf_state = SF_UNINIT;
403 403
404 mutex_init(&mvm->mutex); 404 mutex_init(&mvm->mutex);
405 mutex_init(&mvm->d0i3_suspend_mutex);
405 spin_lock_init(&mvm->async_handlers_lock); 406 spin_lock_init(&mvm->async_handlers_lock);
406 INIT_LIST_HEAD(&mvm->time_event_list); 407 INIT_LIST_HEAD(&mvm->time_event_list);
407 INIT_LIST_HEAD(&mvm->async_handlers_list); 408 INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -465,13 +466,24 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
465 466
466 min_backoff = calc_min_backoff(trans, cfg); 467 min_backoff = calc_min_backoff(trans, cfg);
467 iwl_mvm_tt_initialize(mvm, min_backoff); 468 iwl_mvm_tt_initialize(mvm, min_backoff);
469 /* set the nvm_file_name according to priority */
470 if (iwlwifi_mod_params.nvm_file)
471 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
472 else
473 mvm->nvm_file_name = mvm->cfg->default_nvm_file;
474
475 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
476 "not allowing power-up and not having nvm_file\n"))
477 goto out_free;
468 478
469 /* 479 /*
470 * If the NVM exists in an external file, 480 * Even if nvm exists in the nvm_file driver should read agin the nvm
471 * there is no need to unnecessarily power up the NIC at driver load 481 * from the nic because there might be entries that exist in the OTP
482 * and not in the file.
483 * for nics with no_power_up_nic_in_init: rely completley on nvm_file
472 */ 484 */
473 if (iwlwifi_mod_params.nvm_file) { 485 if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
474 err = iwl_nvm_init(mvm); 486 err = iwl_nvm_init(mvm, false);
475 if (err) 487 if (err)
476 goto out_free; 488 goto out_free;
477 } else { 489 } else {
@@ -518,7 +530,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
518 out_free: 530 out_free:
519 iwl_phy_db_free(mvm->phy_db); 531 iwl_phy_db_free(mvm->phy_db);
520 kfree(mvm->scan_cmd); 532 kfree(mvm->scan_cmd);
521 if (!iwlwifi_mod_params.nvm_file) 533 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
522 iwl_trans_op_mode_leave(trans); 534 iwl_trans_op_mode_leave(trans);
523 ieee80211_free_hw(mvm->hw); 535 ieee80211_free_hw(mvm->hw);
524 return NULL; 536 return NULL;
@@ -538,6 +550,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
538 kfree(mvm->scan_cmd); 550 kfree(mvm->scan_cmd);
539 vfree(mvm->fw_error_dump); 551 vfree(mvm->fw_error_dump);
540 kfree(mvm->fw_error_sram); 552 kfree(mvm->fw_error_sram);
553 kfree(mvm->fw_error_rxf);
541 kfree(mvm->mcast_filter_cmd); 554 kfree(mvm->mcast_filter_cmd);
542 mvm->mcast_filter_cmd = NULL; 555 mvm->mcast_filter_cmd = NULL;
543 556
@@ -814,6 +827,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
814 struct iwl_fw_error_dump_file *dump_file; 827 struct iwl_fw_error_dump_file *dump_file;
815 struct iwl_fw_error_dump_data *dump_data; 828 struct iwl_fw_error_dump_data *dump_data;
816 u32 file_len; 829 u32 file_len;
830 u32 trans_len;
817 831
818 lockdep_assert_held(&mvm->mutex); 832 lockdep_assert_held(&mvm->mutex);
819 833
@@ -821,8 +835,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
821 return; 835 return;
822 836
823 file_len = mvm->fw_error_sram_len + 837 file_len = mvm->fw_error_sram_len +
838 mvm->fw_error_rxf_len +
824 sizeof(*dump_file) + 839 sizeof(*dump_file) +
825 sizeof(*dump_data); 840 sizeof(*dump_data) * 2;
841
842 trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
843 if (trans_len)
844 file_len += trans_len;
826 845
827 dump_file = vmalloc(file_len); 846 dump_file = vmalloc(file_len);
828 if (!dump_file) 847 if (!dump_file)
@@ -833,7 +852,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
833 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); 852 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
834 dump_file->file_len = cpu_to_le32(file_len); 853 dump_file->file_len = cpu_to_le32(file_len);
835 dump_data = (void *)dump_file->data; 854 dump_data = (void *)dump_file->data;
836 dump_data->type = IWL_FW_ERROR_DUMP_SRAM; 855 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
856 dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
857 memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
858
859 dump_data = iwl_mvm_fw_error_next_data(dump_data);
860 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
837 dump_data->len = cpu_to_le32(mvm->fw_error_sram_len); 861 dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
838 862
839 /* 863 /*
@@ -842,6 +866,23 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
842 * mvm->fw_error_sram right now. 866 * mvm->fw_error_sram right now.
843 */ 867 */
844 memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len); 868 memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
869
870 kfree(mvm->fw_error_rxf);
871 mvm->fw_error_rxf = NULL;
872 mvm->fw_error_rxf_len = 0;
873
874 kfree(mvm->fw_error_sram);
875 mvm->fw_error_sram = NULL;
876 mvm->fw_error_sram_len = 0;
877
878 if (trans_len) {
879 void *buf = iwl_mvm_fw_error_next_data(dump_data);
880 u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
881 trans_len);
882 dump_data = (void *)((u8 *)buf + real_trans_len);
883 dump_file->file_len =
884 cpu_to_le32(file_len - trans_len + real_trans_len);
885 }
845} 886}
846#endif 887#endif
847 888
@@ -853,6 +894,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
853 894
854#ifdef CONFIG_IWLWIFI_DEBUGFS 895#ifdef CONFIG_IWLWIFI_DEBUGFS
855 iwl_mvm_fw_error_sram_dump(mvm); 896 iwl_mvm_fw_error_sram_dump(mvm);
897 iwl_mvm_fw_error_rxf_dump(mvm);
856#endif 898#endif
857 899
858 iwl_mvm_nic_restart(mvm); 900 iwl_mvm_nic_restart(mvm);
@@ -1126,9 +1168,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1126 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); 1168 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1127 struct iwl_host_cmd get_status_cmd = { 1169 struct iwl_host_cmd get_status_cmd = {
1128 .id = WOWLAN_GET_STATUSES, 1170 .id = WOWLAN_GET_STATUSES,
1129 .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB, 1171 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
1130 }; 1172 };
1131 struct iwl_wowlan_status_v6 *status; 1173 struct iwl_wowlan_status *status;
1132 int ret; 1174 int ret;
1133 u32 disconnection_reasons, wakeup_reasons; 1175 u32 disconnection_reasons, wakeup_reasons;
1134 __le16 *qos_seq = NULL; 1176 __le16 *qos_seq = NULL;
@@ -1158,18 +1200,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1158 iwl_free_resp(&get_status_cmd); 1200 iwl_free_resp(&get_status_cmd);
1159out: 1201out:
1160 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1202 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1203 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1161 mutex_unlock(&mvm->mutex); 1204 mutex_unlock(&mvm->mutex);
1162} 1205}
1163 1206
1164static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) 1207int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
1165{ 1208{
1166 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1167 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | 1209 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1168 CMD_WAKE_UP_TRANS; 1210 CMD_WAKE_UP_TRANS;
1169 int ret; 1211 int ret;
1170 1212
1171 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); 1213 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1172 1214
1215 mutex_lock(&mvm->d0i3_suspend_mutex);
1216 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1217 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1218 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1219 mutex_unlock(&mvm->d0i3_suspend_mutex);
1220 return 0;
1221 }
1222 mutex_unlock(&mvm->d0i3_suspend_mutex);
1223
1173 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); 1224 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1174 if (ret) 1225 if (ret)
1175 goto out; 1226 goto out;
@@ -1183,6 +1234,25 @@ out:
1183 return ret; 1234 return ret;
1184} 1235}
1185 1236
1237static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1238{
1239 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1240
1241 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1242 return _iwl_mvm_exit_d0i3(mvm);
1243}
1244
1245static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
1246 struct napi_struct *napi,
1247 struct net_device *napi_dev,
1248 int (*poll)(struct napi_struct *, int),
1249 int weight)
1250{
1251 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1252
1253 ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
1254}
1255
1186static const struct iwl_op_mode_ops iwl_mvm_ops = { 1256static const struct iwl_op_mode_ops iwl_mvm_ops = {
1187 .start = iwl_op_mode_mvm_start, 1257 .start = iwl_op_mode_mvm_start,
1188 .stop = iwl_op_mode_mvm_stop, 1258 .stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1266,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
1196 .nic_config = iwl_mvm_nic_config, 1266 .nic_config = iwl_mvm_nic_config,
1197 .enter_d0i3 = iwl_mvm_enter_d0i3, 1267 .enter_d0i3 = iwl_mvm_enter_d0i3,
1198 .exit_d0i3 = iwl_mvm_exit_d0i3, 1268 .exit_d0i3 = iwl_mvm_exit_d0i3,
1269 .napi_add = iwl_mvm_napi_add,
1199}; 1270};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 237efe0ac1c4..539f3a942d43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,6 +156,18 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
156 idle_cnt = chains_static; 156 idle_cnt = chains_static;
157 active_cnt = chains_dynamic; 157 active_cnt = chains_dynamic;
158 158
159 /* In scenarios where we only ever use a single-stream rates,
160 * i.e. legacy 11b/g/a associations, single-stream APs or even
161 * static SMPS, enable both chains to get diversity, improving
162 * the case where we're far enough from the AP that attenuation
163 * between the two antennas is sufficiently different to impact
164 * performance.
165 */
166 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
167 idle_cnt = 2;
168 active_cnt = 2;
169 }
170
159 cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant << 171 cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
160 PHY_RX_CHAIN_VALID_POS); 172 PHY_RX_CHAIN_VALID_POS);
161 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); 173 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -187,7 +199,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
187 iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, 199 iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
188 chains_static, chains_dynamic); 200 chains_static, chains_dynamic);
189 201
190 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, 202 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
191 sizeof(struct iwl_phy_context_cmd), 203 sizeof(struct iwl_phy_context_cmd),
192 &cmd); 204 &cmd);
193 if (ret) 205 if (ret)
@@ -202,18 +214,15 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
202 struct cfg80211_chan_def *chandef, 214 struct cfg80211_chan_def *chandef,
203 u8 chains_static, u8 chains_dynamic) 215 u8 chains_static, u8 chains_dynamic)
204{ 216{
205 int ret;
206
207 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 217 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
208 ctxt->ref); 218 ctxt->ref);
209 lockdep_assert_held(&mvm->mutex); 219 lockdep_assert_held(&mvm->mutex);
210 220
211 ctxt->channel = chandef->chan; 221 ctxt->channel = chandef->chan;
212 ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
213 chains_static, chains_dynamic,
214 FW_CTXT_ACTION_ADD, 0);
215 222
216 return ret; 223 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
224 chains_static, chains_dynamic,
225 FW_CTXT_ACTION_ADD, 0);
217} 226}
218 227
219/* 228/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 6b636eab3339..c182a8baf685 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -123,28 +123,6 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
123 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); 123 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
124} 124}
125 125
126int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
127 struct ieee80211_vif *vif, bool enable)
128{
129 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
130 struct iwl_beacon_filter_cmd cmd = {
131 IWL_BF_CMD_CONFIG_DEFAULTS,
132 .bf_enable_beacon_filter = cpu_to_le32(1),
133 .ba_enable_beacon_abort = cpu_to_le32(enable),
134 };
135
136 if (!mvmvif->bf_data.bf_enabled)
137 return 0;
138
139 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
140 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
141
142 mvmvif->bf_data.ba_enabled = enable;
143 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
144 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
145 return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
146}
147
148static void iwl_mvm_power_log(struct iwl_mvm *mvm, 126static void iwl_mvm_power_log(struct iwl_mvm *mvm,
149 struct iwl_mac_power_cmd *cmd) 127 struct iwl_mac_power_cmd *cmd)
150{ 128{
@@ -268,6 +246,57 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
268 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; 246 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
269} 247}
270 248
249static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
250 struct ieee80211_vif *vif)
251{
252 unsigned long *data = _data;
253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
254
255 if (!mvmvif->phy_ctxt)
256 return;
257
258 if (vif->type == NL80211_IFTYPE_STATION ||
259 vif->type == NL80211_IFTYPE_AP)
260 __set_bit(mvmvif->phy_ctxt->id, data);
261}
262
263static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
264 struct ieee80211_vif *vif)
265{
266 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
267 unsigned long phy_ctxt_counter = 0;
268
269 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
270 IEEE80211_IFACE_ITER_NORMAL,
271 iwl_mvm_binding_iterator,
272 &phy_ctxt_counter);
273
274 if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
275 ETH_ALEN))
276 return false;
277
278 if (vif->p2p &&
279 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
280 return false;
281 /*
282 * Avoid using uAPSD if P2P client is associated to GO that uses
283 * opportunistic power save. This is due to current FW limitation.
284 */
285 if (vif->p2p &&
286 (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
287 IEEE80211_P2P_OPPPS_ENABLE_BIT))
288 return false;
289
290 /*
291 * Avoid using uAPSD if client is in DCM -
292 * low latency issue in Miracast
293 */
294 if (hweight8(phy_ctxt_counter) >= 2)
295 return false;
296
297 return true;
298}
299
271static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, 300static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
272 struct ieee80211_vif *vif, 301 struct ieee80211_vif *vif,
273 struct iwl_mac_power_cmd *cmd) 302 struct iwl_mac_power_cmd *cmd)
@@ -280,7 +309,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
280 bool radar_detect = false; 309 bool radar_detect = false;
281 struct iwl_mvm_vif *mvmvif __maybe_unused = 310 struct iwl_mvm_vif *mvmvif __maybe_unused =
282 iwl_mvm_vif_from_mac80211(vif); 311 iwl_mvm_vif_from_mac80211(vif);
283 bool allow_uapsd = true;
284 312
285 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 313 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
286 mvmvif->color)); 314 mvmvif->color));
@@ -303,13 +331,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
303 331
304 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 332 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
305 333
306#ifdef CONFIG_IWLWIFI_DEBUGFS
307 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
308 mvmvif->dbgfs_pm.disable_power_off)
309 cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
310#endif
311 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || 334 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
312 mvm->pm_disabled) 335 !mvmvif->pm_enabled)
313 return; 336 return;
314 337
315 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); 338 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +374,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
351 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); 374 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
352 } 375 }
353 376
354 if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, 377 if (iwl_mvm_power_allow_uapsd(mvm, vif))
355 ETH_ALEN))
356 allow_uapsd = false;
357
358 if (vif->p2p &&
359 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
360 allow_uapsd = false;
361 /*
362 * Avoid using uAPSD if P2P client is associated to GO that uses
363 * opportunistic power save. This is due to current FW limitation.
364 */
365 if (vif->p2p &&
366 vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
367 IEEE80211_P2P_OPPPS_ENABLE_BIT)
368 allow_uapsd = false;
369
370 if (allow_uapsd)
371 iwl_mvm_power_configure_uapsd(mvm, vif, cmd); 378 iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
372 379
373#ifdef CONFIG_IWLWIFI_DEBUGFS 380#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,20 +428,13 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
421{ 428{
422 struct iwl_mac_power_cmd cmd = {}; 429 struct iwl_mac_power_cmd cmd = {};
423 430
424 if (vif->type != NL80211_IFTYPE_STATION)
425 return 0;
426
427 if (vif->p2p &&
428 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
429 return 0;
430
431 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 431 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
432 iwl_mvm_power_log(mvm, &cmd); 432 iwl_mvm_power_log(mvm, &cmd);
433#ifdef CONFIG_IWLWIFI_DEBUGFS 433#ifdef CONFIG_IWLWIFI_DEBUGFS
434 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); 434 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
435#endif 435#endif
436 436
437 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC, 437 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
438 sizeof(cmd), &cmd); 438 sizeof(cmd), &cmd);
439} 439}
440 440
@@ -444,12 +444,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
444 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK), 444 .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
445 }; 445 };
446 446
447 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
448 return 0;
449
450 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
451 return 0;
452
453 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) 447 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
454 mvm->ps_disabled = true; 448 mvm->ps_disabled = true;
455 449
@@ -466,7 +460,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
466 "Sending device power command with flags = 0x%X\n", 460 "Sending device power command with flags = 0x%X\n",
467 cmd.flags); 461 cmd.flags);
468 462
469 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd), 463 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
470 &cmd); 464 &cmd);
471} 465}
472 466
@@ -508,86 +502,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
508 return 0; 502 return 0;
509} 503}
510 504
511struct iwl_power_constraint { 505struct iwl_power_vifs {
512 struct ieee80211_vif *bf_vif; 506 struct ieee80211_vif *bf_vif;
513 struct ieee80211_vif *bss_vif; 507 struct ieee80211_vif *bss_vif;
514 struct ieee80211_vif *p2p_vif; 508 struct ieee80211_vif *p2p_vif;
515 u16 bss_phyctx_id; 509 struct ieee80211_vif *ap_vif;
516 u16 p2p_phyctx_id; 510 struct ieee80211_vif *monitor_vif;
517 bool pm_disabled; 511 bool p2p_active;
518 bool ps_disabled; 512 bool bss_active;
519 struct iwl_mvm *mvm; 513 bool ap_active;
514 bool monitor_active;
520}; 515};
521 516
522static void iwl_mvm_power_iterator(void *_data, u8 *mac, 517static void iwl_mvm_power_iterator(void *_data, u8 *mac,
523 struct ieee80211_vif *vif) 518 struct ieee80211_vif *vif)
524{ 519{
525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 520 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
526 struct iwl_power_constraint *power_iterator = _data; 521 struct iwl_power_vifs *power_iterator = _data;
527 struct iwl_mvm *mvm = power_iterator->mvm;
528 522
523 mvmvif->pm_enabled = false;
529 switch (ieee80211_vif_type_p2p(vif)) { 524 switch (ieee80211_vif_type_p2p(vif)) {
530 case NL80211_IFTYPE_P2P_DEVICE: 525 case NL80211_IFTYPE_P2P_DEVICE:
531 break; 526 break;
532 527
533 case NL80211_IFTYPE_P2P_GO: 528 case NL80211_IFTYPE_P2P_GO:
534 case NL80211_IFTYPE_AP: 529 case NL80211_IFTYPE_AP:
535 /* no BSS power mgmt if we have an active AP */ 530 /* only a single MAC of the same type */
536 if (mvmvif->ap_ibss_active) 531 WARN_ON(power_iterator->ap_vif);
537 power_iterator->pm_disabled = true; 532 power_iterator->ap_vif = vif;
533 if (mvmvif->phy_ctxt)
534 if (mvmvif->phy_ctxt->id < MAX_PHYS)
535 power_iterator->ap_active = true;
538 break; 536 break;
539 537
540 case NL80211_IFTYPE_MONITOR: 538 case NL80211_IFTYPE_MONITOR:
541 /* no BSS power mgmt and no device power save */ 539 /* only a single MAC of the same type */
542 power_iterator->pm_disabled = true; 540 WARN_ON(power_iterator->monitor_vif);
543 power_iterator->ps_disabled = true; 541 power_iterator->monitor_vif = vif;
542 if (mvmvif->phy_ctxt)
543 if (mvmvif->phy_ctxt->id < MAX_PHYS)
544 power_iterator->monitor_active = true;
544 break; 545 break;
545 546
546 case NL80211_IFTYPE_P2P_CLIENT: 547 case NL80211_IFTYPE_P2P_CLIENT:
547 if (mvmvif->phy_ctxt) 548 /* only a single MAC of the same type */
548 power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
549
550 /* we should have only one P2P vif */
551 WARN_ON(power_iterator->p2p_vif); 549 WARN_ON(power_iterator->p2p_vif);
552 power_iterator->p2p_vif = vif; 550 power_iterator->p2p_vif = vif;
553 551 if (mvmvif->phy_ctxt)
554 IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n", 552 if (mvmvif->phy_ctxt->id < MAX_PHYS)
555 power_iterator->p2p_phyctx_id, 553 power_iterator->p2p_active = true;
556 power_iterator->bss_phyctx_id);
557 if (!(mvm->fw->ucode_capa.flags &
558 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
559 /* no BSS power mgmt if we have a P2P client*/
560 power_iterator->pm_disabled = true;
561 } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
562 power_iterator->bss_phyctx_id < MAX_PHYS &&
563 power_iterator->p2p_phyctx_id ==
564 power_iterator->bss_phyctx_id) {
565 power_iterator->pm_disabled = true;
566 }
567 break; 554 break;
568 555
569 case NL80211_IFTYPE_STATION: 556 case NL80211_IFTYPE_STATION:
570 if (mvmvif->phy_ctxt) 557 /* only a single MAC of the same type */
571 power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
572
573 /* we should have only one BSS vif */
574 WARN_ON(power_iterator->bss_vif); 558 WARN_ON(power_iterator->bss_vif);
575 power_iterator->bss_vif = vif; 559 power_iterator->bss_vif = vif;
560 if (mvmvif->phy_ctxt)
561 if (mvmvif->phy_ctxt->id < MAX_PHYS)
562 power_iterator->bss_active = true;
576 563
577 if (mvmvif->bf_data.bf_enabled && 564 if (mvmvif->bf_data.bf_enabled &&
578 !WARN_ON(power_iterator->bf_vif)) 565 !WARN_ON(power_iterator->bf_vif))
579 power_iterator->bf_vif = vif; 566 power_iterator->bf_vif = vif;
580 567
581 IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
582 power_iterator->p2p_phyctx_id,
583 power_iterator->bss_phyctx_id);
584 if (mvm->fw->ucode_capa.flags &
585 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
586 (power_iterator->p2p_phyctx_id < MAX_PHYS &&
587 power_iterator->bss_phyctx_id < MAX_PHYS &&
588 power_iterator->p2p_phyctx_id ==
589 power_iterator->bss_phyctx_id))
590 power_iterator->pm_disabled = true;
591 break; 568 break;
592 569
593 default: 570 default:
@@ -596,70 +573,73 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
596} 573}
597 574
598static void 575static void
599iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm, 576iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
600 struct iwl_power_constraint *constraint) 577 struct iwl_power_vifs *vifs)
601{ 578{
602 lockdep_assert_held(&mvm->mutex); 579 struct iwl_mvm_vif *bss_mvmvif = NULL;
580 struct iwl_mvm_vif *p2p_mvmvif = NULL;
581 struct iwl_mvm_vif *ap_mvmvif = NULL;
582 bool client_same_channel = false;
583 bool ap_same_channel = false;
603 584
604 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) { 585 lockdep_assert_held(&mvm->mutex);
605 constraint->pm_disabled = true;
606 constraint->ps_disabled = true;
607 }
608 586
587 /* get vifs info + set pm_enable to false */
609 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 588 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
610 IEEE80211_IFACE_ITER_NORMAL, 589 IEEE80211_IFACE_ITER_NORMAL,
611 iwl_mvm_power_iterator, constraint); 590 iwl_mvm_power_iterator, vifs);
612}
613
614int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
615{
616 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
617 struct iwl_power_constraint constraint = {
618 .p2p_phyctx_id = MAX_PHYS,
619 .bss_phyctx_id = MAX_PHYS,
620 .mvm = mvm,
621 };
622 bool ba_enable;
623 int ret;
624 591
625 lockdep_assert_held(&mvm->mutex); 592 if (vifs->bss_vif)
593 bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
626 594
627 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) 595 if (vifs->p2p_vif)
628 return 0; 596 p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
629 597
630 iwl_mvm_power_get_global_constraint(mvm, &constraint); 598 if (vifs->ap_vif)
631 mvm->ps_disabled = constraint.ps_disabled; 599 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
632 mvm->pm_disabled = constraint.pm_disabled;
633 600
634 /* don't update device power state unless we add / remove monitor */ 601 /* enable PM on bss if bss stand alone */
635 if (vif->type == NL80211_IFTYPE_MONITOR) { 602 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
636 ret = iwl_mvm_power_update_device(mvm); 603 bss_mvmvif->pm_enabled = true;
637 if (ret) 604 return;
638 return ret;
639 } 605 }
640 606
641 if (constraint.bss_vif) { 607 /* enable PM on p2p if p2p stand alone */
642 ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif); 608 if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
643 if (ret) 609 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
644 return ret; 610 p2p_mvmvif->pm_enabled = true;
611 return;
645 } 612 }
646 613
647 if (constraint.p2p_vif) { 614 if (vifs->bss_active && vifs->p2p_active)
648 ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif); 615 client_same_channel = (bss_mvmvif->phy_ctxt->id ==
649 if (ret) 616 p2p_mvmvif->phy_ctxt->id);
650 return ret; 617 if (vifs->bss_active && vifs->ap_active)
618 ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
619 ap_mvmvif->phy_ctxt->id);
620
621 /* clients are not stand alone: enable PM if DCM */
622 if (!(client_same_channel || ap_same_channel) &&
623 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
624 if (vifs->bss_active)
625 bss_mvmvif->pm_enabled = true;
626 if (vifs->p2p_active &&
627 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
628 p2p_mvmvif->pm_enabled = true;
629 return;
651 } 630 }
652 631
653 if (!constraint.bf_vif) 632 /*
654 return 0; 633 * There is only one channel in the system and there are only
655 634 * bss and p2p clients that share it
656 vif = constraint.bf_vif; 635 */
657 mvmvif = iwl_mvm_vif_from_mac80211(vif); 636 if (client_same_channel && !vifs->ap_active &&
658 637 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
659 ba_enable = !(constraint.pm_disabled || constraint.ps_disabled || 638 /* share same channel*/
660 !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif)); 639 bss_mvmvif->pm_enabled = true;
661 640 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
662 return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable); 641 p2p_mvmvif->pm_enabled = true;
642 }
663} 643}
664 644
665#ifdef CONFIG_IWLWIFI_DEBUGFS 645#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +651,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
671 struct iwl_mac_power_cmd cmd = {}; 651 struct iwl_mac_power_cmd cmd = {};
672 int pos = 0; 652 int pos = 0;
673 653
674 if (WARN_ON(!(mvm->fw->ucode_capa.flags &
675 IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
676 return 0;
677
678 mutex_lock(&mvm->mutex); 654 mutex_lock(&mvm->mutex);
679 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd)); 655 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
680 mutex_unlock(&mvm->mutex); 656 mutex_unlock(&mvm->mutex);
681 657
682 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
683 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
684 (cmd.flags &
685 cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
686 0 : 1);
687 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", 658 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
688 iwlmvm_mod_params.power_scheme); 659 iwlmvm_mod_params.power_scheme);
689 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", 660 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -790,7 +761,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
790 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 761 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
791 int ret; 762 int ret;
792 763
793 if (mvmvif != mvm->bf_allowed_vif || 764 if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
794 vif->type != NL80211_IFTYPE_STATION || vif->p2p) 765 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
795 return 0; 766 return 0;
796 767
@@ -818,6 +789,26 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
818 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); 789 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
819} 790}
820 791
792static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
793 struct ieee80211_vif *vif,
794 bool enable)
795{
796 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
797 struct iwl_beacon_filter_cmd cmd = {
798 IWL_BF_CMD_CONFIG_DEFAULTS,
799 .bf_enable_beacon_filter = cpu_to_le32(1),
800 };
801
802 if (!mvmvif->bf_data.bf_enabled)
803 return 0;
804
805 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
806 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
807
808 mvmvif->bf_data.ba_enabled = enable;
809 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
810}
811
821int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 812int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
822 struct ieee80211_vif *vif, 813 struct ieee80211_vif *vif,
823 u32 flags) 814 u32 flags)
@@ -826,8 +817,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
826 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 817 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
827 int ret; 818 int ret;
828 819
829 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) || 820 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
830 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
831 return 0; 821 return 0;
832 822
833 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); 823 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -838,6 +828,55 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
838 return ret; 828 return ret;
839} 829}
840 830
831int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
832{
833 struct iwl_mvm_vif *mvmvif;
834 struct iwl_power_vifs vifs = {};
835 bool ba_enable;
836 int ret;
837
838 lockdep_assert_held(&mvm->mutex);
839
840 iwl_mvm_power_set_pm(mvm, &vifs);
841
842 /* disable PS if CAM */
843 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
844 mvm->ps_disabled = true;
845 } else {
846 /* don't update device power state unless we add / remove monitor */
847 if (vifs.monitor_vif) {
848 if (vifs.monitor_active)
849 mvm->ps_disabled = true;
850 ret = iwl_mvm_power_update_device(mvm);
851 if (ret)
852 return ret;
853 }
854 }
855
856 if (vifs.bss_vif) {
857 ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
858 if (ret)
859 return ret;
860 }
861
862 if (vifs.p2p_vif) {
863 ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
864 if (ret)
865 return ret;
866 }
867
868 if (!vifs.bf_vif)
869 return 0;
870
871 mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif);
872
873 ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
874 !vifs.bf_vif->bss_conf.ps ||
875 iwl_mvm_vif_low_latency(mvmvif));
876
877 return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
878}
879
841int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, 880int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
842 struct ieee80211_vif *vif, 881 struct ieee80211_vif *vif,
843 bool enable, u32 flags) 882 bool enable, u32 flags)
@@ -861,9 +900,10 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
861 if (WARN_ON(!dtimper_msec)) 900 if (WARN_ON(!dtimper_msec))
862 return 0; 901 return 0;
863 902
864 cmd.flags |=
865 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
866 cmd.skip_dtim_periods = 300 / dtimper_msec; 903 cmd.skip_dtim_periods = 300 / dtimper_msec;
904 if (cmd.skip_dtim_periods)
905 cmd.flags |=
906 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
867 } 907 }
868 iwl_mvm_power_log(mvm, &cmd); 908 iwl_mvm_power_log(mvm, &cmd);
869#ifdef CONFIG_IWLWIFI_DEBUGFS 909#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -894,33 +934,3 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
894 934
895 return ret; 935 return ret;
896} 936}
897
898int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
899 struct ieee80211_vif *vif,
900 bool force,
901 u32 flags)
902{
903 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
904
905 if (mvmvif != mvm->bf_allowed_vif)
906 return 0;
907
908 if (!mvmvif->bf_data.bf_enabled) {
909 /* disable beacon filtering explicitly if force is true */
910 if (force)
911 return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
912 return 0;
913 }
914
915 return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
916}
917
918int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
919{
920 struct iwl_powertable_cmd cmd = {
921 .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
922 };
923
924 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
925 sizeof(cmd), &cmd);
926}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 35e86e06dffd..ba68d7b84505 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -285,7 +285,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
285 285
286 iwl_mvm_adjust_quota_for_noa(mvm, &cmd); 286 iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
287 287
288 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, 288 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
289 sizeof(cmd), &cmd); 289 sizeof(cmd), &cmd);
290 if (ret) 290 if (ret)
291 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 291 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index e1c838899363..306a6caa4868 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -211,7 +211,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
211 .next_columns = { 211 .next_columns = {
212 RS_COLUMN_LEGACY_ANT_B, 212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A, 213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B, 214 RS_COLUMN_MIMO2,
215 RS_COLUMN_INVALID, 215 RS_COLUMN_INVALID,
216 RS_COLUMN_INVALID, 216 RS_COLUMN_INVALID,
217 RS_COLUMN_INVALID, 217 RS_COLUMN_INVALID,
@@ -223,8 +223,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
223 .ant = ANT_B, 223 .ant = ANT_B,
224 .next_columns = { 224 .next_columns = {
225 RS_COLUMN_LEGACY_ANT_A, 225 RS_COLUMN_LEGACY_ANT_A,
226 RS_COLUMN_SISO_ANT_A,
227 RS_COLUMN_SISO_ANT_B, 226 RS_COLUMN_SISO_ANT_B,
227 RS_COLUMN_MIMO2,
228 RS_COLUMN_INVALID, 228 RS_COLUMN_INVALID,
229 RS_COLUMN_INVALID, 229 RS_COLUMN_INVALID,
230 RS_COLUMN_INVALID, 230 RS_COLUMN_INVALID,
@@ -238,10 +238,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
238 RS_COLUMN_SISO_ANT_B, 238 RS_COLUMN_SISO_ANT_B,
239 RS_COLUMN_MIMO2, 239 RS_COLUMN_MIMO2,
240 RS_COLUMN_SISO_ANT_A_SGI, 240 RS_COLUMN_SISO_ANT_A_SGI,
241 RS_COLUMN_SISO_ANT_B_SGI,
242 RS_COLUMN_LEGACY_ANT_A, 241 RS_COLUMN_LEGACY_ANT_A,
243 RS_COLUMN_LEGACY_ANT_B, 242 RS_COLUMN_LEGACY_ANT_B,
244 RS_COLUMN_INVALID, 243 RS_COLUMN_INVALID,
244 RS_COLUMN_INVALID,
245 }, 245 },
246 .checks = { 246 .checks = {
247 rs_siso_allow, 247 rs_siso_allow,
@@ -254,10 +254,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
254 RS_COLUMN_SISO_ANT_A, 254 RS_COLUMN_SISO_ANT_A,
255 RS_COLUMN_MIMO2, 255 RS_COLUMN_MIMO2,
256 RS_COLUMN_SISO_ANT_B_SGI, 256 RS_COLUMN_SISO_ANT_B_SGI,
257 RS_COLUMN_SISO_ANT_A_SGI,
258 RS_COLUMN_LEGACY_ANT_A, 257 RS_COLUMN_LEGACY_ANT_A,
259 RS_COLUMN_LEGACY_ANT_B, 258 RS_COLUMN_LEGACY_ANT_B,
260 RS_COLUMN_INVALID, 259 RS_COLUMN_INVALID,
260 RS_COLUMN_INVALID,
261 }, 261 },
262 .checks = { 262 .checks = {
263 rs_siso_allow, 263 rs_siso_allow,
@@ -271,10 +271,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
271 RS_COLUMN_SISO_ANT_B_SGI, 271 RS_COLUMN_SISO_ANT_B_SGI,
272 RS_COLUMN_MIMO2_SGI, 272 RS_COLUMN_MIMO2_SGI,
273 RS_COLUMN_SISO_ANT_A, 273 RS_COLUMN_SISO_ANT_A,
274 RS_COLUMN_SISO_ANT_B,
275 RS_COLUMN_MIMO2,
276 RS_COLUMN_LEGACY_ANT_A, 274 RS_COLUMN_LEGACY_ANT_A,
277 RS_COLUMN_LEGACY_ANT_B, 275 RS_COLUMN_LEGACY_ANT_B,
276 RS_COLUMN_INVALID,
277 RS_COLUMN_INVALID,
278 }, 278 },
279 .checks = { 279 .checks = {
280 rs_siso_allow, 280 rs_siso_allow,
@@ -289,10 +289,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
289 RS_COLUMN_SISO_ANT_A_SGI, 289 RS_COLUMN_SISO_ANT_A_SGI,
290 RS_COLUMN_MIMO2_SGI, 290 RS_COLUMN_MIMO2_SGI,
291 RS_COLUMN_SISO_ANT_B, 291 RS_COLUMN_SISO_ANT_B,
292 RS_COLUMN_SISO_ANT_A,
293 RS_COLUMN_MIMO2,
294 RS_COLUMN_LEGACY_ANT_A, 292 RS_COLUMN_LEGACY_ANT_A,
295 RS_COLUMN_LEGACY_ANT_B, 293 RS_COLUMN_LEGACY_ANT_B,
294 RS_COLUMN_INVALID,
295 RS_COLUMN_INVALID,
296 }, 296 },
297 .checks = { 297 .checks = {
298 rs_siso_allow, 298 rs_siso_allow,
@@ -304,12 +304,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
304 .ant = ANT_AB, 304 .ant = ANT_AB,
305 .next_columns = { 305 .next_columns = {
306 RS_COLUMN_SISO_ANT_A, 306 RS_COLUMN_SISO_ANT_A,
307 RS_COLUMN_SISO_ANT_B,
308 RS_COLUMN_SISO_ANT_A_SGI,
309 RS_COLUMN_SISO_ANT_B_SGI,
310 RS_COLUMN_MIMO2_SGI, 307 RS_COLUMN_MIMO2_SGI,
311 RS_COLUMN_LEGACY_ANT_A, 308 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B, 309 RS_COLUMN_LEGACY_ANT_B,
310 RS_COLUMN_INVALID,
311 RS_COLUMN_INVALID,
312 RS_COLUMN_INVALID,
313 }, 313 },
314 .checks = { 314 .checks = {
315 rs_mimo_allow, 315 rs_mimo_allow,
@@ -321,12 +321,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
321 .sgi = true, 321 .sgi = true,
322 .next_columns = { 322 .next_columns = {
323 RS_COLUMN_SISO_ANT_A_SGI, 323 RS_COLUMN_SISO_ANT_A_SGI,
324 RS_COLUMN_SISO_ANT_B_SGI,
325 RS_COLUMN_SISO_ANT_A,
326 RS_COLUMN_SISO_ANT_B,
327 RS_COLUMN_MIMO2, 324 RS_COLUMN_MIMO2,
328 RS_COLUMN_LEGACY_ANT_A, 325 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B, 326 RS_COLUMN_LEGACY_ANT_B,
327 RS_COLUMN_INVALID,
328 RS_COLUMN_INVALID,
329 RS_COLUMN_INVALID,
330 }, 330 },
331 .checks = { 331 .checks = {
332 rs_mimo_allow, 332 rs_mimo_allow,
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); 527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
528 for (i = 0; i < IWL_RATE_COUNT; i++) 528 for (i = 0; i < IWL_RATE_COUNT; i++)
529 rs_rate_scale_clear_window(&tbl->win[i]); 529 rs_rate_scale_clear_window(&tbl->win[i]);
530
531 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
532 rs_rate_scale_clear_window(&tbl->tpc_win[i]);
530} 533}
531 534
532static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) 535static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
656 return 0; 659 return 0;
657} 660}
658 661
659static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, 662static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
660 int scale_index, int attempts, int successes) 663 struct iwl_scale_tbl_info *tbl,
664 int scale_index, int attempts, int successes,
665 u8 reduced_txp)
661{ 666{
662 struct iwl_rate_scale_data *window = NULL; 667 struct iwl_rate_scale_data *window = NULL;
668 int ret;
663 669
664 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 670 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
665 return -EINVAL; 671 return -EINVAL;
666 672
673 if (tbl->column != RS_COLUMN_INVALID) {
674 lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
675 lq_sta->tx_stats[tbl->column][scale_index].success += successes;
676 }
677
667 /* Select window for current tx bit rate */ 678 /* Select window for current tx bit rate */
668 window = &(tbl->win[scale_index]); 679 window = &(tbl->win[scale_index]);
669 680
681 ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
682 window);
683 if (ret)
684 return ret;
685
686 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
687 return -EINVAL;
688
689 window = &tbl->tpc_win[reduced_txp];
670 return _rs_collect_tx_data(tbl, scale_index, attempts, successes, 690 return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
671 window); 691 window);
672} 692}
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1000 u32 ucode_rate; 1020 u32 ucode_rate;
1001 struct rs_rate rate; 1021 struct rs_rate rate;
1002 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 1022 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1023 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
1003 1024
1004 /* Treat uninitialized rate scaling data same as non-existing. */ 1025 /* Treat uninitialized rate scaling data same as non-existing. */
1005 if (!lq_sta) { 1026 if (!lq_sta) {
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1141 if (info->flags & IEEE80211_TX_STAT_AMPDU) { 1162 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1142 ucode_rate = le32_to_cpu(table->rs_table[0]); 1163 ucode_rate = le32_to_cpu(table->rs_table[0]);
1143 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); 1164 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
1144 rs_collect_tx_data(curr_tbl, rate.index, 1165 rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
1145 info->status.ampdu_len, 1166 info->status.ampdu_len,
1146 info->status.ampdu_ack_len); 1167 info->status.ampdu_ack_len,
1168 reduced_txp);
1147 1169
1148 /* Update success/fail counts if not searching for new mode */ 1170 /* Update success/fail counts if not searching for new mode */
1149 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { 1171 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1176 else 1198 else
1177 continue; 1199 continue;
1178 1200
1179 rs_collect_tx_data(tmp_tbl, rate.index, 1, 1201 rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
1180 i < retries ? 0 : legacy_success); 1202 i < retries ? 0 : legacy_success,
1203 reduced_txp);
1181 } 1204 }
1182 1205
1183 /* Update success/fail counts if not searching for new mode */ 1206 /* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1188 } 1211 }
1189 /* The last TX rate is cached in lq_sta; it's set in if/else above */ 1212 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1190 lq_sta->last_rate_n_flags = ucode_rate; 1213 lq_sta->last_rate_n_flags = ucode_rate;
1214 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
1191done: 1215done:
1192 /* See if there's a better rate or modulation mode to try. */ 1216 /* See if there's a better rate or modulation mode to try. */
1193 if (sta && sta->supp_rates[sband->band]) 1217 if (sta && sta->supp_rates[sband->band])
@@ -1311,105 +1335,50 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1311 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); 1335 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
1312} 1336}
1313 1337
1314/*
1315 * Find starting rate for new "search" high-throughput mode of modulation.
1316 * Goal is to find lowest expected rate (under perfect conditions) that is
1317 * above the current measured throughput of "active" mode, to give new mode
1318 * a fair chance to prove itself without too many challenges.
1319 *
1320 * This gets called when transitioning to more aggressive modulation
1321 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1322 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1323 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1324 * bit rate will typically need to increase, but not if performance was bad.
1325 */
1326static s32 rs_get_best_rate(struct iwl_mvm *mvm, 1338static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1327 struct iwl_lq_sta *lq_sta, 1339 struct iwl_lq_sta *lq_sta,
1328 struct iwl_scale_tbl_info *tbl, /* "search" */ 1340 struct iwl_scale_tbl_info *tbl, /* "search" */
1329 u16 rate_mask, s8 index) 1341 unsigned long rate_mask, s8 index)
1330{ 1342{
1331 /* "active" values */
1332 struct iwl_scale_tbl_info *active_tbl = 1343 struct iwl_scale_tbl_info *active_tbl =
1333 &(lq_sta->lq_info[lq_sta->active_tbl]); 1344 &(lq_sta->lq_info[lq_sta->active_tbl]);
1334 s32 active_sr = active_tbl->win[index].success_ratio; 1345 s32 success_ratio = active_tbl->win[index].success_ratio;
1335 s32 active_tpt = active_tbl->expected_tpt[index]; 1346 u16 expected_current_tpt = active_tbl->expected_tpt[index];
1336 /* expected "search" throughput */
1337 const u16 *tpt_tbl = tbl->expected_tpt; 1347 const u16 *tpt_tbl = tbl->expected_tpt;
1338
1339 s32 new_rate, high, low, start_hi;
1340 u16 high_low; 1348 u16 high_low;
1341 s8 rate = index; 1349 u32 target_tpt;
1342 1350 int rate_idx;
1343 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1344
1345 while (1) {
1346 high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
1347 tbl->rate.type);
1348
1349 low = high_low & 0xff;
1350 high = (high_low >> 8) & 0xff;
1351 1351
1352 /* 1352 if (success_ratio > RS_SR_NO_DECREASE) {
1353 * Lower the "search" bit rate, to give new "search" mode 1353 target_tpt = 100 * expected_current_tpt;
1354 * approximately the same throughput as "active" if: 1354 IWL_DEBUG_RATE(mvm,
1355 * 1355 "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
1356 * 1) "Active" mode has been working modestly well (but not 1356 success_ratio, target_tpt);
1357 * great), and expected "search" throughput (under perfect 1357 } else {
1358 * conditions) at candidate rate is above the actual 1358 target_tpt = lq_sta->last_tpt;
1359 * measured "active" throughput (but less than expected 1359 IWL_DEBUG_RATE(mvm,
1360 * "active" throughput under perfect conditions). 1360 "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
1361 * OR 1361 success_ratio, target_tpt);
1362 * 2) "Active" mode has been working perfectly or very well 1362 }
1363 * and expected "search" throughput (under perfect
1364 * conditions) at candidate rate is above expected
1365 * "active" throughput (under perfect conditions).
1366 */
1367 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1368 ((active_sr > RS_SR_FORCE_DECREASE) &&
1369 (active_sr <= IWL_RATE_HIGH_TH) &&
1370 (tpt_tbl[rate] <= active_tpt))) ||
1371 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1372 (tpt_tbl[rate] > active_tpt))) {
1373 /* (2nd or later pass)
1374 * If we've already tried to raise the rate, and are
1375 * now trying to lower it, use the higher rate. */
1376 if (start_hi != IWL_RATE_INVALID) {
1377 new_rate = start_hi;
1378 break;
1379 }
1380
1381 new_rate = rate;
1382 1363
1383 /* Loop again with lower rate */ 1364 rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
1384 if (low != IWL_RATE_INVALID)
1385 rate = low;
1386 1365
1387 /* Lower rate not available, use the original */ 1366 while (rate_idx != IWL_RATE_INVALID) {
1388 else 1367 if (target_tpt < (100 * tpt_tbl[rate_idx]))
1389 break; 1368 break;
1390
1391 /* Else try to raise the "search" rate to match "active" */
1392 } else {
1393 /* (2nd or later pass)
1394 * If we've already tried to lower the rate, and are
1395 * now trying to raise it, use the lower rate. */
1396 if (new_rate != IWL_RATE_INVALID)
1397 break;
1398 1369
1399 /* Loop again with higher rate */ 1370 high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
1400 else if (high != IWL_RATE_INVALID) { 1371 tbl->rate.type);
1401 start_hi = high;
1402 rate = high;
1403 1372
1404 /* Higher rate not available, use the original */ 1373 rate_idx = (high_low >> 8) & 0xff;
1405 } else {
1406 new_rate = rate;
1407 break;
1408 }
1409 }
1410 } 1374 }
1411 1375
1412 return new_rate; 1376 IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
1377 rate_idx, target_tpt,
1378 rate_idx != IWL_RATE_INVALID ?
1379 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
1380
1381 return rate_idx;
1413} 1382}
1414 1383
1415static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) 1384static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
@@ -1584,7 +1553,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1584 1553
1585 tpt = lq_sta->last_tpt / 100; 1554 tpt = lq_sta->last_tpt / 100;
1586 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col, 1555 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
1587 tbl->rate.bw); 1556 rs_bw_from_sta_bw(sta));
1588 if (WARN_ON_ONCE(!expected_tpt_tbl)) 1557 if (WARN_ON_ONCE(!expected_tpt_tbl))
1589 continue; 1558 continue;
1590 1559
@@ -1625,7 +1594,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
1625 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; 1594 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
1626 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1595 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1627 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1596 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1628 u16 rate_mask = 0; 1597 unsigned long rate_mask = 0;
1629 u32 rate_idx = 0; 1598 u32 rate_idx = 0;
1630 1599
1631 memcpy(search_tbl, tbl, sz); 1600 memcpy(search_tbl, tbl, sz);
@@ -1667,7 +1636,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
1667 !(BIT(rate_idx) & rate_mask)) { 1636 !(BIT(rate_idx) & rate_mask)) {
1668 IWL_DEBUG_RATE(mvm, 1637 IWL_DEBUG_RATE(mvm,
1669 "can not switch with index %d" 1638 "can not switch with index %d"
1670 " rate mask %x\n", 1639 " rate mask %lx\n",
1671 rate_idx, rate_mask); 1640 rate_idx, rate_mask);
1672 1641
1673 goto err; 1642 goto err;
@@ -1769,6 +1738,203 @@ out:
1769 return action; 1738 return action;
1770} 1739}
1771 1740
1741static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
1742 int *weaker, int *stronger)
1743{
1744 *weaker = index + TPC_TX_POWER_STEP;
1745 if (*weaker > TPC_MAX_REDUCTION)
1746 *weaker = TPC_INVALID;
1747
1748 *stronger = index - TPC_TX_POWER_STEP;
1749 if (*stronger < 0)
1750 *stronger = TPC_INVALID;
1751}
1752
1753static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1754 struct rs_rate *rate, enum ieee80211_band band)
1755{
1756 int index = rate->index;
1757 bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
1758 bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
1759 !vif->bss_conf.ps);
1760
1761 IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
1762 cam, sta_ps_disabled);
1763 /*
1764 * allow tpc only if power management is enabled, or bt coex
1765 * activity grade allows it and we are on 2.4Ghz.
1766 */
1767 if ((cam || sta_ps_disabled) &&
1768 !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
1769 return false;
1770
1771 IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
1772 if (is_legacy(rate))
1773 return index == IWL_RATE_54M_INDEX;
1774 if (is_ht(rate))
1775 return index == IWL_RATE_MCS_7_INDEX;
1776 if (is_vht(rate))
1777 return index == IWL_RATE_MCS_7_INDEX ||
1778 index == IWL_RATE_MCS_8_INDEX ||
1779 index == IWL_RATE_MCS_9_INDEX;
1780
1781 WARN_ON_ONCE(1);
1782 return false;
1783}
1784
1785enum tpc_action {
1786 TPC_ACTION_STAY,
1787 TPC_ACTION_DECREASE,
1788 TPC_ACTION_INCREASE,
1789 TPC_ACTION_NO_RESTIRCTION,
1790};
1791
1792static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
1793 s32 sr, int weak, int strong,
1794 int current_tpt,
1795 int weak_tpt, int strong_tpt)
1796{
1797 /* stay until we have valid tpt */
1798 if (current_tpt == IWL_INVALID_VALUE) {
1799 IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
1800 return TPC_ACTION_STAY;
1801 }
1802
1803 /* Too many failures, increase txp */
1804 if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
1805 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
1806 return TPC_ACTION_NO_RESTIRCTION;
1807 }
1808
1809 /* try decreasing first if applicable */
1810 if (weak != TPC_INVALID) {
1811 if (weak_tpt == IWL_INVALID_VALUE &&
1812 (strong_tpt == IWL_INVALID_VALUE ||
1813 current_tpt >= strong_tpt)) {
1814 IWL_DEBUG_RATE(mvm,
1815 "no weak txp measurement. decrease txp\n");
1816 return TPC_ACTION_DECREASE;
1817 }
1818
1819 if (weak_tpt > current_tpt) {
1820 IWL_DEBUG_RATE(mvm,
1821 "lower txp has better tpt. decrease txp\n");
1822 return TPC_ACTION_DECREASE;
1823 }
1824 }
1825
1826 /* next, increase if needed */
1827 if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
1828 if (weak_tpt == IWL_INVALID_VALUE &&
1829 strong_tpt != IWL_INVALID_VALUE &&
1830 current_tpt < strong_tpt) {
1831 IWL_DEBUG_RATE(mvm,
1832 "higher txp has better tpt. increase txp\n");
1833 return TPC_ACTION_INCREASE;
1834 }
1835
1836 if (weak_tpt < current_tpt &&
1837 (strong_tpt == IWL_INVALID_VALUE ||
1838 strong_tpt > current_tpt)) {
1839 IWL_DEBUG_RATE(mvm,
1840 "lower txp has worse tpt. increase txp\n");
1841 return TPC_ACTION_INCREASE;
1842 }
1843 }
1844
1845 IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
1846 return TPC_ACTION_STAY;
1847}
1848
1849static bool rs_tpc_perform(struct iwl_mvm *mvm,
1850 struct ieee80211_sta *sta,
1851 struct iwl_lq_sta *lq_sta,
1852 struct iwl_scale_tbl_info *tbl)
1853{
1854 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
1855 struct ieee80211_vif *vif = mvm_sta->vif;
1856 struct ieee80211_chanctx_conf *chanctx_conf;
1857 enum ieee80211_band band;
1858 struct iwl_rate_scale_data *window;
1859 struct rs_rate *rate = &tbl->rate;
1860 enum tpc_action action;
1861 s32 sr;
1862 u8 cur = lq_sta->lq.reduced_tpc;
1863 int current_tpt;
1864 int weak, strong;
1865 int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
1866
1867#ifdef CONFIG_MAC80211_DEBUGFS
1868 if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
1869 IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
1870 lq_sta->dbg_fixed_txp_reduction);
1871 lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
1872 return cur != lq_sta->dbg_fixed_txp_reduction;
1873 }
1874#endif
1875
1876 rcu_read_lock();
1877 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1878 if (WARN_ON(!chanctx_conf))
1879 band = IEEE80211_NUM_BANDS;
1880 else
1881 band = chanctx_conf->def.chan->band;
1882 rcu_read_unlock();
1883
1884 if (!rs_tpc_allowed(mvm, vif, rate, band)) {
1885 IWL_DEBUG_RATE(mvm,
1886 "tpc is not allowed. remove txp restrictions\n");
1887 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
1888 return cur != TPC_NO_REDUCTION;
1889 }
1890
1891 rs_get_adjacent_txp(mvm, cur, &weak, &strong);
1892
1893 /* Collect measured throughputs for current and adjacent rates */
1894 window = tbl->tpc_win;
1895 sr = window[cur].success_ratio;
1896 current_tpt = window[cur].average_tpt;
1897 if (weak != TPC_INVALID)
1898 weak_tpt = window[weak].average_tpt;
1899 if (strong != TPC_INVALID)
1900 strong_tpt = window[strong].average_tpt;
1901
1902 IWL_DEBUG_RATE(mvm,
1903 "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
1904 cur, current_tpt, sr, weak, strong,
1905 weak_tpt, strong_tpt);
1906
1907 action = rs_get_tpc_action(mvm, sr, weak, strong,
1908 current_tpt, weak_tpt, strong_tpt);
1909
1910 /* override actions if we are on the edge */
1911 if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
1912 IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
1913 action = TPC_ACTION_STAY;
1914 } else if (strong == TPC_INVALID &&
1915 (action == TPC_ACTION_INCREASE ||
1916 action == TPC_ACTION_NO_RESTIRCTION)) {
1917 IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
1918 action = TPC_ACTION_STAY;
1919 }
1920
1921 switch (action) {
1922 case TPC_ACTION_DECREASE:
1923 lq_sta->lq.reduced_tpc = weak;
1924 return true;
1925 case TPC_ACTION_INCREASE:
1926 lq_sta->lq.reduced_tpc = strong;
1927 return true;
1928 case TPC_ACTION_NO_RESTIRCTION:
1929 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
1930 return true;
1931 case TPC_ACTION_STAY:
1932 /* do nothing */
1933 break;
1934 }
1935 return false;
1936}
1937
1772/* 1938/*
1773 * Do rate scaling and search for new modulation mode. 1939 * Do rate scaling and search for new modulation mode.
1774 */ 1940 */
@@ -2019,6 +2185,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2019 break; 2185 break;
2020 case RS_ACTION_STAY: 2186 case RS_ACTION_STAY:
2021 /* No change */ 2187 /* No change */
2188 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
2189 update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
2190 break;
2022 default: 2191 default:
2023 break; 2192 break;
2024 } 2193 }
@@ -2271,10 +2440,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2271 if (i == IWL_RATE_9M_INDEX) 2440 if (i == IWL_RATE_9M_INDEX)
2272 continue; 2441 continue;
2273 2442
2274 /* Disable MCS9 as a workaround */
2275 if (i == IWL_RATE_MCS_9_INDEX)
2276 continue;
2277
2278 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ 2443 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2279 if (i == IWL_RATE_MCS_9_INDEX && 2444 if (i == IWL_RATE_MCS_9_INDEX &&
2280 sta->bandwidth == IEEE80211_STA_RX_BW_20) 2445 sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2293,10 +2458,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2293 if (i == IWL_RATE_9M_INDEX) 2458 if (i == IWL_RATE_9M_INDEX)
2294 continue; 2459 continue;
2295 2460
2296 /* Disable MCS9 as a workaround */
2297 if (i == IWL_RATE_MCS_9_INDEX)
2298 continue;
2299
2300 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ 2461 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2301 if (i == IWL_RATE_MCS_9_INDEX && 2462 if (i == IWL_RATE_MCS_9_INDEX &&
2302 sta->bandwidth == IEEE80211_STA_RX_BW_20) 2463 sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2478,6 +2639,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2478 lq_sta->is_agg = 0; 2639 lq_sta->is_agg = 0;
2479#ifdef CONFIG_MAC80211_DEBUGFS 2640#ifdef CONFIG_MAC80211_DEBUGFS
2480 lq_sta->dbg_fixed_rate = 0; 2641 lq_sta->dbg_fixed_rate = 0;
2642 lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
2481#endif 2643#endif
2482#ifdef CONFIG_IWLWIFI_DEBUGFS 2644#ifdef CONFIG_IWLWIFI_DEBUGFS
2483 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats); 2645 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2815,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2653 rs_build_rates_table_from_fixed(mvm, lq_cmd, 2815 rs_build_rates_table_from_fixed(mvm, lq_cmd,
2654 lq_sta->band, 2816 lq_sta->band,
2655 lq_sta->dbg_fixed_rate); 2817 lq_sta->dbg_fixed_rate);
2818 lq_cmd->reduced_tpc = 0;
2656 ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> 2819 ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2657 RATE_MCS_ANT_POS; 2820 RATE_MCS_ANT_POS;
2658 } else 2821 } else
@@ -2783,7 +2946,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2783 size_t buf_size; 2946 size_t buf_size;
2784 u32 parsed_rate; 2947 u32 parsed_rate;
2785 2948
2786
2787 mvm = lq_sta->drv; 2949 mvm = lq_sta->drv;
2788 memset(buf, 0, sizeof(buf)); 2950 memset(buf, 0, sizeof(buf));
2789 buf_size = min(count, sizeof(buf) - 1); 2951 buf_size = min(count, sizeof(buf) - 1);
@@ -2856,6 +3018,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2856 lq_sta->lq.agg_disable_start_th, 3018 lq_sta->lq.agg_disable_start_th,
2857 lq_sta->lq.agg_frame_cnt_limit); 3019 lq_sta->lq.agg_frame_cnt_limit);
2858 3020
3021 desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
2859 desc += sprintf(buff+desc, 3022 desc += sprintf(buff+desc,
2860 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", 3023 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2861 lq_sta->lq.initial_rate_index[0], 3024 lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3091,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2928 .llseek = default_llseek, 3091 .llseek = default_llseek,
2929}; 3092};
2930 3093
3094static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
3095 char __user *user_buf,
3096 size_t count, loff_t *ppos)
3097{
3098 static const char * const column_name[] = {
3099 [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
3100 [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
3101 [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
3102 [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
3103 [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
3104 [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
3105 [RS_COLUMN_MIMO2] = "MIMO2",
3106 [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
3107 };
3108
3109 static const char * const rate_name[] = {
3110 [IWL_RATE_1M_INDEX] = "1M",
3111 [IWL_RATE_2M_INDEX] = "2M",
3112 [IWL_RATE_5M_INDEX] = "5.5M",
3113 [IWL_RATE_11M_INDEX] = "11M",
3114 [IWL_RATE_6M_INDEX] = "6M|MCS0",
3115 [IWL_RATE_9M_INDEX] = "9M",
3116 [IWL_RATE_12M_INDEX] = "12M|MCS1",
3117 [IWL_RATE_18M_INDEX] = "18M|MCS2",
3118 [IWL_RATE_24M_INDEX] = "24M|MCS3",
3119 [IWL_RATE_36M_INDEX] = "36M|MCS4",
3120 [IWL_RATE_48M_INDEX] = "48M|MCS5",
3121 [IWL_RATE_54M_INDEX] = "54M|MCS6",
3122 [IWL_RATE_MCS_7_INDEX] = "MCS7",
3123 [IWL_RATE_MCS_8_INDEX] = "MCS8",
3124 [IWL_RATE_MCS_9_INDEX] = "MCS9",
3125 };
3126
3127 char *buff, *pos, *endpos;
3128 int col, rate;
3129 ssize_t ret;
3130 struct iwl_lq_sta *lq_sta = file->private_data;
3131 struct rs_rate_stats *stats;
3132 static const size_t bufsz = 1024;
3133
3134 buff = kmalloc(bufsz, GFP_KERNEL);
3135 if (!buff)
3136 return -ENOMEM;
3137
3138 pos = buff;
3139 endpos = pos + bufsz;
3140
3141 pos += scnprintf(pos, endpos - pos, "COLUMN,");
3142 for (rate = 0; rate < IWL_RATE_COUNT; rate++)
3143 pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
3144 pos += scnprintf(pos, endpos - pos, "\n");
3145
3146 for (col = 0; col < RS_COLUMN_COUNT; col++) {
3147 pos += scnprintf(pos, endpos - pos,
3148 "%s,", column_name[col]);
3149
3150 for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
3151 stats = &(lq_sta->tx_stats[col][rate]);
3152 pos += scnprintf(pos, endpos - pos,
3153 "%llu/%llu,",
3154 stats->success,
3155 stats->total);
3156 }
3157 pos += scnprintf(pos, endpos - pos, "\n");
3158 }
3159
3160 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
3161 kfree(buff);
3162 return ret;
3163}
3164
3165static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
3166 const char __user *user_buf,
3167 size_t count, loff_t *ppos)
3168{
3169 struct iwl_lq_sta *lq_sta = file->private_data;
3170 memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
3171
3172 return count;
3173}
3174
3175static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
3176 .read = rs_sta_dbgfs_drv_tx_stats_read,
3177 .write = rs_sta_dbgfs_drv_tx_stats_write,
3178 .open = simple_open,
3179 .llseek = default_llseek,
3180};
3181
2931static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) 3182static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
2932{ 3183{
2933 struct iwl_lq_sta *lq_sta = mvm_sta; 3184 struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3188,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
2937 lq_sta->rs_sta_dbgfs_stats_table_file = 3188 lq_sta->rs_sta_dbgfs_stats_table_file =
2938 debugfs_create_file("rate_stats_table", S_IRUSR, dir, 3189 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2939 lq_sta, &rs_sta_dbgfs_stats_table_ops); 3190 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3191 lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
3192 debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
3193 lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
2940 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = 3194 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2941 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, 3195 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2942 &lq_sta->tx_agg_tid_en); 3196 &lq_sta->tx_agg_tid_en);
3197 lq_sta->rs_sta_dbgfs_reduced_txp_file =
3198 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
3199 &lq_sta->dbg_fixed_txp_reduction);
2943} 3200}
2944 3201
2945static void rs_remove_debugfs(void *mvm, void *mvm_sta) 3202static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3204,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
2947 struct iwl_lq_sta *lq_sta = mvm_sta; 3204 struct iwl_lq_sta *lq_sta = mvm_sta;
2948 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 3205 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2949 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 3206 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3207 debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
2950 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); 3208 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3209 debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
2951} 3210}
2952#endif 3211#endif
2953 3212
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 0acfac96a56c..374a83d7db25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -158,6 +158,13 @@ enum {
158#define RS_SR_FORCE_DECREASE 1920 /* 15% */ 158#define RS_SR_FORCE_DECREASE 1920 /* 15% */
159#define RS_SR_NO_DECREASE 10880 /* 85% */ 159#define RS_SR_NO_DECREASE 10880 /* 85% */
160 160
161#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
162#define TPC_SR_NO_INCREASE 10880 /* 85% */
163#define TPC_TX_POWER_STEP 3
164#define TPC_MAX_REDUCTION 15
165#define TPC_NO_REDUCTION 0
166#define TPC_INVALID 0xff
167
161#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 168#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
162#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 169#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
163#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) 170#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -266,9 +273,16 @@ enum rs_column {
266 RS_COLUMN_MIMO2_SGI, 273 RS_COLUMN_MIMO2_SGI,
267 274
268 RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI, 275 RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
276 RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
269 RS_COLUMN_INVALID, 277 RS_COLUMN_INVALID,
270}; 278};
271 279
280/* Packet stats per rate */
281struct rs_rate_stats {
282 u64 success;
283 u64 total;
284};
285
272/** 286/**
273 * struct iwl_scale_tbl_info -- tx params and success history for all rates 287 * struct iwl_scale_tbl_info -- tx params and success history for all rates
274 * 288 *
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
280 enum rs_column column; 294 enum rs_column column;
281 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 295 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
282 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 296 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
297 /* per txpower-reduction history */
298 struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
283}; 299};
284 300
285enum { 301enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
315 bool is_vht; 331 bool is_vht;
316 enum ieee80211_band band; 332 enum ieee80211_band band;
317 333
334 struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
335
318 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 336 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
319 unsigned long active_legacy_rate; 337 unsigned long active_legacy_rate;
320 unsigned long active_siso_rate; 338 unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
334#ifdef CONFIG_MAC80211_DEBUGFS 352#ifdef CONFIG_MAC80211_DEBUGFS
335 struct dentry *rs_sta_dbgfs_scale_table_file; 353 struct dentry *rs_sta_dbgfs_scale_table_file;
336 struct dentry *rs_sta_dbgfs_stats_table_file; 354 struct dentry *rs_sta_dbgfs_stats_table_file;
355 struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
337 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 356 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
357 struct dentry *rs_sta_dbgfs_reduced_txp_file;
338 u32 dbg_fixed_rate; 358 u32 dbg_fixed_rate;
359 u8 dbg_fixed_txp_reduction;
339#endif 360#endif
340 struct iwl_mvm *drv; 361 struct iwl_mvm *drv;
341 362
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
345 u32 last_rate_n_flags; 366 u32 last_rate_n_flags;
346 /* packets destined for this STA are aggregated */ 367 /* packets destined for this STA are aggregated */
347 u8 is_agg; 368 u8 is_agg;
369
370 /* tx power reduce for this sta */
371 int tpc_reduce;
348}; 372};
349 373
350/* Initialize station's rate scaling information after adding station */ 374/* Initialize station's rate scaling information after adding station */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 6061553a5e44..cf7276967acd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -60,7 +60,6 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62#include "iwl-trans.h" 62#include "iwl-trans.h"
63
64#include "mvm.h" 63#include "mvm.h"
65#include "fw-api.h" 64#include "fw-api.h"
66 65
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
130 129
131 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 130 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
132 131
133 ieee80211_rx_ni(mvm->hw, skb); 132 ieee80211_rx(mvm->hw, skb);
134}
135
136static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
137 struct iwl_rx_phy_info *phy_info,
138 struct ieee80211_rx_status *rx_status)
139{
140 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
141 u32 agc_a, agc_b;
142 u32 val;
143
144 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
145 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
146 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
147
148 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
149 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
150 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
151
152 /*
153 * dBm = rssi dB - agc dB - constant.
154 * Higher AGC (higher radio gain) means lower signal.
155 */
156 rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
157 rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
158 max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
159
160 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
161 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
162
163 rx_status->signal = max_rssi_dbm;
164 rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
165 RX_RES_PHY_FLAGS_ANTENNA)
166 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
167 rx_status->chain_signal[0] = rssi_a_dbm;
168 rx_status->chain_signal[1] = rssi_b_dbm;
169} 133}
170 134
171/* 135/*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
337 */ 301 */
338 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ 302 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
339 303
340 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API) 304 iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
341 iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
342 else
343 iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
344 305
345 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, 306 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
346 (unsigned long long)rx_status.mactime); 307 (unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
394 rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; 355 rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
395 rx_status.flag |= RX_FLAG_VHT; 356 rx_status.flag |= RX_FLAG_VHT;
396 rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT; 357 rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
358 if (rate_n_flags & RATE_MCS_BF_MSK)
359 rx_status.vht_flag |= RX_VHT_FLAG_BF;
397 } else { 360 } else {
398 rx_status.rate_idx = 361 rx_status.rate_idx =
399 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, 362 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c28de54c75d4..4b6c7d4bd199 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -306,7 +306,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
306 .id = SCAN_REQUEST_CMD, 306 .id = SCAN_REQUEST_CMD,
307 .len = { 0, }, 307 .len = { 0, },
308 .data = { mvm->scan_cmd, }, 308 .data = { mvm->scan_cmd, },
309 .flags = CMD_SYNC,
310 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 309 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
311 }; 310 };
312 struct iwl_scan_cmd *cmd = mvm->scan_cmd; 311 struct iwl_scan_cmd *cmd = mvm->scan_cmd;
@@ -319,7 +318,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
319 struct iwl_mvm_scan_params params = {}; 318 struct iwl_mvm_scan_params params = {};
320 319
321 lockdep_assert_held(&mvm->mutex); 320 lockdep_assert_held(&mvm->mutex);
322 BUG_ON(mvm->scan_cmd == NULL); 321
322 /* we should have failed registration if scan_cmd was NULL */
323 if (WARN_ON(mvm->scan_cmd == NULL))
324 return -ENOMEM;
323 325
324 IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n"); 326 IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
325 mvm->scan_status = IWL_MVM_SCAN_OS; 327 mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -514,7 +516,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
514 ARRAY_SIZE(scan_abort_notif), 516 ARRAY_SIZE(scan_abort_notif),
515 iwl_mvm_scan_abort_notif, NULL); 517 iwl_mvm_scan_abort_notif, NULL);
516 518
517 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 519 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
518 if (ret) { 520 if (ret) {
519 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 521 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
520 /* mac80211's state will be cleaned in the nic_restart flow */ 522 /* mac80211's state will be cleaned in the nic_restart flow */
@@ -538,9 +540,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
538 /* scan status must be locked for proper checking */ 540 /* scan status must be locked for proper checking */
539 lockdep_assert_held(&mvm->mutex); 541 lockdep_assert_held(&mvm->mutex);
540 542
541 IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n", 543 IWL_DEBUG_SCAN(mvm,
544 "Scheduled scan completed, status %s EBS status %s:%d\n",
542 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? 545 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
543 "completed" : "aborted"); 546 "completed" : "aborted", scan_notif->ebs_status ==
547 IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
548 scan_notif->ebs_status);
549
544 550
545 /* only call mac80211 completion if the stop was initiated by FW */ 551 /* only call mac80211 completion if the stop was initiated by FW */
546 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) { 552 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -548,6 +554,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
548 ieee80211_sched_scan_stopped(mvm->hw); 554 ieee80211_sched_scan_stopped(mvm->hw);
549 } 555 }
550 556
557 mvm->last_ebs_successful = !scan_notif->ebs_status;
558
551 return 0; 559 return 0;
552} 560}
553 561
@@ -740,7 +748,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
740 struct iwl_scan_offload_cfg *scan_cfg; 748 struct iwl_scan_offload_cfg *scan_cfg;
741 struct iwl_host_cmd cmd = { 749 struct iwl_host_cmd cmd = {
742 .id = SCAN_OFFLOAD_CONFIG_CMD, 750 .id = SCAN_OFFLOAD_CONFIG_CMD,
743 .flags = CMD_SYNC,
744 }; 751 };
745 struct iwl_mvm_scan_params params = {}; 752 struct iwl_mvm_scan_params params = {};
746 753
@@ -798,7 +805,6 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
798 struct iwl_scan_offload_blacklist *blacklist; 805 struct iwl_scan_offload_blacklist *blacklist;
799 struct iwl_host_cmd cmd = { 806 struct iwl_host_cmd cmd = {
800 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, 807 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
801 .flags = CMD_SYNC,
802 .len[1] = sizeof(*profile_cfg), 808 .len[1] = sizeof(*profile_cfg),
803 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 809 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
804 .dataflags[1] = IWL_HCMD_DFL_NOCOPY, 810 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
@@ -884,7 +890,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
884 scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL); 890 scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
885 } 891 }
886 892
887 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC, 893 if (mvm->last_ebs_successful &&
894 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
895 scan_req.flags |=
896 cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
897
898 return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
888 sizeof(scan_req), &scan_req); 899 sizeof(scan_req), &scan_req);
889} 900}
890 901
@@ -893,7 +904,6 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
893 int ret; 904 int ret;
894 struct iwl_host_cmd cmd = { 905 struct iwl_host_cmd cmd = {
895 .id = SCAN_OFFLOAD_ABORT_CMD, 906 .id = SCAN_OFFLOAD_ABORT_CMD,
896 .flags = CMD_SYNC,
897 }; 907 };
898 u32 status; 908 u32 status;
899 909
@@ -922,7 +932,7 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
922 return ret; 932 return ret;
923} 933}
924 934
925int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm) 935int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
926{ 936{
927 int ret; 937 int ret;
928 struct iwl_notification_wait wait_scan_done; 938 struct iwl_notification_wait wait_scan_done;
@@ -960,5 +970,8 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
960 */ 970 */
961 mvm->scan_status = IWL_MVM_SCAN_NONE; 971 mvm->scan_status = IWL_MVM_SCAN_NONE;
962 972
973 if (notify)
974 ieee80211_sched_scan_stopped(mvm->hw);
975
963 return 0; 976 return 0;
964} 977}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 88809b2d1654..7edfd15efc9d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
237 .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, 237 .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
238 }; 238 };
239 239
240 if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
241 return 0;
242
243 /* 240 /*
244 * Ignore the call if we are in HW Restart flow, or if the handled 241 * Ignore the call if we are in HW Restart flow, or if the handled
245 * vif is a p2p device. 242 * vif is a p2p device.
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index f339ef884250..1fb01ea2e704 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,115 +66,6 @@
66#include "sta.h" 66#include "sta.h"
67#include "rs.h" 67#include "rs.h"
68 68
69static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
70 struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
71{
72 memset(cmd_v5, 0, sizeof(*cmd_v5));
73
74 cmd_v5->add_modify = cmd_v7->add_modify;
75 cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
76 cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
77 memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
78 cmd_v5->sta_id = cmd_v7->sta_id;
79 cmd_v5->modify_mask = cmd_v7->modify_mask;
80 cmd_v5->station_flags = cmd_v7->station_flags;
81 cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
82 cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
83 cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
84 cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
85 cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
86 cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
87 cmd_v5->assoc_id = cmd_v7->assoc_id;
88 cmd_v5->beamform_flags = cmd_v7->beamform_flags;
89 cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
90}
91
92static void
93iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
94 struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
95 u32 mac_id_n_color)
96{
97 memset(sta_cmd, 0, sizeof(*sta_cmd));
98
99 sta_cmd->sta_id = key_cmd->sta_id;
100 sta_cmd->add_modify = STA_MODE_MODIFY;
101 sta_cmd->modify_mask = STA_MODIFY_KEY;
102 sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
103
104 sta_cmd->key.key_offset = key_cmd->key_offset;
105 sta_cmd->key.key_flags = key_cmd->key_flags;
106 memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
107 sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
108 memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
109 sizeof(sta_cmd->key.tkip_rx_ttak));
110}
111
112static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
113 struct iwl_mvm_add_sta_cmd_v7 *cmd,
114 int *status)
115{
116 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
117
118 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
119 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
120 cmd, status);
121
122 iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
123
124 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
125 &cmd_v5, status);
126}
127
128static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
129 struct iwl_mvm_add_sta_cmd_v7 *cmd)
130{
131 struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
132
133 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
134 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
135 sizeof(*cmd), cmd);
136
137 iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
138
139 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
140 &cmd_v5);
141}
142
143static int
144iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
145 struct iwl_mvm_add_sta_key_cmd *cmd,
146 u32 mac_id_n_color,
147 int *status)
148{
149 struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
150
151 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
152 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
153 sizeof(*cmd), cmd, status);
154
155 iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
156
157 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
158 &sta_cmd, status);
159}
160
161static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
162 u32 flags,
163 struct iwl_mvm_add_sta_key_cmd *cmd,
164 u32 mac_id_n_color)
165{
166 struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
167
168 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
169 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
170 sizeof(*cmd), cmd);
171
172 iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
173
174 return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
175 &sta_cmd);
176}
177
178static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 69static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
179 enum nl80211_iftype iftype) 70 enum nl80211_iftype iftype)
180{ 71{
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
207 bool update) 98 bool update)
208{ 99{
209 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 100 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
210 struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd; 101 struct iwl_mvm_add_sta_cmd add_sta_cmd;
211 int ret; 102 int ret;
212 u32 status; 103 u32 status;
213 u32 agg_size = 0, mpdu_dens = 0; 104 u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
295 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 186 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
296 187
297 status = ADD_STA_SUCCESS; 188 status = ADD_STA_SUCCESS;
298 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status); 189 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
190 &add_sta_cmd, &status);
299 if (ret) 191 if (ret)
300 return ret; 192 return ret;
301 193
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
380int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 272int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
381 bool drain) 273 bool drain)
382{ 274{
383 struct iwl_mvm_add_sta_cmd_v7 cmd = {}; 275 struct iwl_mvm_add_sta_cmd cmd = {};
384 int ret; 276 int ret;
385 u32 status; 277 u32 status;
386 278
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
393 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 285 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
394 286
395 status = ADD_STA_SUCCESS; 287 status = ADD_STA_SUCCESS;
396 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 288 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
289 &cmd, &status);
397 if (ret) 290 if (ret)
398 return ret; 291 return ret;
399 292
@@ -434,7 +327,7 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
434 return -EINVAL; 327 return -EINVAL;
435 } 328 }
436 329
437 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC, 330 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
438 sizeof(rm_sta_cmd), &rm_sta_cmd); 331 sizeof(rm_sta_cmd), &rm_sta_cmd);
439 if (ret) { 332 if (ret) {
440 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 333 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
498 sta_id); 391 sta_id);
499 continue; 392 continue;
500 } 393 }
501 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL); 394 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
502 clear_bit(sta_id, mvm->sta_drained); 395 clear_bit(sta_id, mvm->sta_drained);
503 } 396 }
504 397
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
520 /* flush its queues here since we are freeing mvm_sta */ 413 /* flush its queues here since we are freeing mvm_sta */
521 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true); 414 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
522 415
523 /*
524 * Put a non-NULL since the fw station isn't removed.
525 * It will be removed after the MAC will be set as
526 * unassoc.
527 */
528 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
529 ERR_PTR(-EINVAL));
530
531 /* if we are associated - we can't remove the AP STA now */ 416 /* if we are associated - we can't remove the AP STA now */
532 if (vif->bss_conf.assoc) 417 if (vif->bss_conf.assoc)
533 return ret; 418 return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
557 } else { 442 } else {
558 spin_unlock_bh(&mvm_sta->lock); 443 spin_unlock_bh(&mvm_sta->lock);
559 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 444 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
560 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 445 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
561 } 446 }
562 447
563 return ret; 448 return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
571 456
572 lockdep_assert_held(&mvm->mutex); 457 lockdep_assert_held(&mvm->mutex);
573 458
574 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL); 459 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
575 return ret; 460 return ret;
576} 461}
577 462
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
593 478
594void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 479void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
595{ 480{
596 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 481 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
597 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 482 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
598 sta->sta_id = IWL_MVM_STATION_COUNT; 483 sta->sta_id = IWL_MVM_STATION_COUNT;
599} 484}
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
603 const u8 *addr, 488 const u8 *addr,
604 u16 mac_id, u16 color) 489 u16 mac_id, u16 color)
605{ 490{
606 struct iwl_mvm_add_sta_cmd_v7 cmd; 491 struct iwl_mvm_add_sta_cmd cmd;
607 int ret; 492 int ret;
608 u32 status; 493 u32 status;
609 494
610 lockdep_assert_held(&mvm->mutex); 495 lockdep_assert_held(&mvm->mutex);
611 496
612 memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7)); 497 memset(&cmd, 0, sizeof(cmd));
613 cmd.sta_id = sta->sta_id; 498 cmd.sta_id = sta->sta_id;
614 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 499 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
615 color)); 500 color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
619 if (addr) 504 if (addr)
620 memcpy(cmd.addr, addr, ETH_ALEN); 505 memcpy(cmd.addr, addr, ETH_ALEN);
621 506
622 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 507 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
508 &cmd, &status);
623 if (ret) 509 if (ret)
624 return ret; 510 return ret;
625 511
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
753 int tid, u16 ssn, bool start) 639 int tid, u16 ssn, bool start)
754{ 640{
755 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 641 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
756 struct iwl_mvm_add_sta_cmd_v7 cmd = {}; 642 struct iwl_mvm_add_sta_cmd cmd = {};
757 int ret; 643 int ret;
758 u32 status; 644 u32 status;
759 645
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
777 STA_MODIFY_REMOVE_BA_TID; 663 STA_MODIFY_REMOVE_BA_TID;
778 664
779 status = ADD_STA_SUCCESS; 665 status = ADD_STA_SUCCESS;
780 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 666 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
667 &cmd, &status);
781 if (ret) 668 if (ret)
782 return ret; 669 return ret;
783 670
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
812 int tid, u8 queue, bool start) 699 int tid, u8 queue, bool start)
813{ 700{
814 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 701 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
815 struct iwl_mvm_add_sta_cmd_v7 cmd = {}; 702 struct iwl_mvm_add_sta_cmd cmd = {};
816 int ret; 703 int ret;
817 u32 status; 704 u32 status;
818 705
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
834 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 721 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
835 722
836 status = ADD_STA_SUCCESS; 723 status = ADD_STA_SUCCESS;
837 ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status); 724 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
725 &cmd, &status);
838 if (ret) 726 if (ret)
839 return ret; 727 return ret;
840 728
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1129 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, 1017 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
1130 u32 cmd_flags) 1018 u32 cmd_flags)
1131{ 1019{
1132 __le16 key_flags;
1133 struct iwl_mvm_add_sta_key_cmd cmd = {}; 1020 struct iwl_mvm_add_sta_key_cmd cmd = {};
1021 __le16 key_flags;
1134 int ret, status; 1022 int ret, status;
1135 u16 keyidx; 1023 u16 keyidx;
1136 int i; 1024 int i;
1137 u32 mac_id_n_color = mvm_sta->mac_id_n_color;
1138 1025
1139 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 1026 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1140 STA_KEY_FLG_KEYID_MSK; 1027 STA_KEY_FLG_KEYID_MSK;
@@ -1166,13 +1053,12 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1166 cmd.sta_id = sta_id; 1053 cmd.sta_id = sta_id;
1167 1054
1168 status = ADD_STA_SUCCESS; 1055 status = ADD_STA_SUCCESS;
1169 if (cmd_flags == CMD_SYNC) 1056 if (cmd_flags & CMD_ASYNC)
1170 ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd, 1057 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1171 mac_id_n_color, 1058 sizeof(cmd), &cmd);
1172 &status);
1173 else 1059 else
1174 ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd, 1060 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1175 mac_id_n_color); 1061 &cmd, &status);
1176 1062
1177 switch (status) { 1063 switch (status) {
1178 case ADD_STA_SUCCESS: 1064 case ADD_STA_SUCCESS:
@@ -1225,7 +1111,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1225 remove_key ? "removing" : "installing", 1111 remove_key ? "removing" : "installing",
1226 igtk_cmd.sta_id); 1112 igtk_cmd.sta_id);
1227 1113
1228 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC, 1114 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1229 sizeof(igtk_cmd), &igtk_cmd); 1115 sizeof(igtk_cmd), &igtk_cmd);
1230} 1116}
1231 1117
@@ -1312,15 +1198,15 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1312 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1198 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1313 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1199 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1314 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, 1200 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
1315 seq.tkip.iv32, p1k, CMD_SYNC); 1201 seq.tkip.iv32, p1k, 0);
1316 break; 1202 break;
1317 case WLAN_CIPHER_SUITE_CCMP: 1203 case WLAN_CIPHER_SUITE_CCMP:
1318 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, 1204 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
1319 0, NULL, CMD_SYNC); 1205 0, NULL, 0);
1320 break; 1206 break;
1321 default: 1207 default:
1322 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, 1208 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
1323 sta_id, 0, NULL, CMD_SYNC); 1209 sta_id, 0, NULL, 0);
1324 } 1210 }
1325 1211
1326 if (ret) 1212 if (ret)
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1399 cmd.sta_id = sta_id; 1285 cmd.sta_id = sta_id;
1400 1286
1401 status = ADD_STA_SUCCESS; 1287 status = ADD_STA_SUCCESS;
1402 ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd, 1288 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1403 mvm_sta->mac_id_n_color, 1289 &cmd, &status);
1404 &status);
1405 1290
1406 switch (status) { 1291 switch (status) {
1407 case ADD_STA_SUCCESS: 1292 case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1448 struct ieee80211_sta *sta) 1333 struct ieee80211_sta *sta)
1449{ 1334{
1450 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1335 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1451 struct iwl_mvm_add_sta_cmd_v7 cmd = { 1336 struct iwl_mvm_add_sta_cmd cmd = {
1452 .add_modify = STA_MODE_MODIFY, 1337 .add_modify = STA_MODE_MODIFY,
1453 .sta_id = mvmsta->sta_id, 1338 .sta_id = mvmsta->sta_id,
1454 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 1339 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1456 }; 1341 };
1457 int ret; 1342 int ret;
1458 1343
1459 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd); 1344 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1460 if (ret) 1345 if (ret)
1461 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1346 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1462} 1347}
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1468 bool agg) 1353 bool agg)
1469{ 1354{
1470 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1355 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1471 struct iwl_mvm_add_sta_cmd_v7 cmd = { 1356 struct iwl_mvm_add_sta_cmd cmd = {
1472 .add_modify = STA_MODE_MODIFY, 1357 .add_modify = STA_MODE_MODIFY,
1473 .sta_id = mvmsta->sta_id, 1358 .sta_id = mvmsta->sta_id,
1474 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 1359 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1538 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); 1423 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1539 } 1424 }
1540 1425
1541 ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd); 1426 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1542 if (ret) 1427 if (ret)
1543 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1428 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1544} 1429}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 2ed84c421481..d98e8a2142b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
253 * This is basically (last acked packet++). 253 * This is basically (last acked packet++).
254 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 254 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
255 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 255 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
256 * @reduced_tpc: Reduced tx power. Holds the data between the
257 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
256 * @state: state of the BA agreement establishment / tear down. 258 * @state: state of the BA agreement establishment / tear down.
257 * @txq_id: Tx queue used by the BA session 259 * @txq_id: Tx queue used by the BA session
258 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 260 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
265 u16 next_reclaimed; 267 u16 next_reclaimed;
266 /* The rest is Tx AGG related */ 268 /* The rest is Tx AGG related */
267 u32 rate_n_flags; 269 u32 rate_n_flags;
270 u8 reduced_tpc;
268 enum iwl_mvm_agg_state state; 271 enum iwl_mvm_agg_state state;
269 u16 txq_id; 272 u16 txq_id;
270 u16 ssn; 273 u16 ssn;
@@ -284,8 +287,6 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
284 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for 287 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
285 * tid. 288 * tid.
286 * @max_agg_bufsize: the maximal size of the AGG buffer for this station 289 * @max_agg_bufsize: the maximal size of the AGG buffer for this station
287 * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
288 * by debugfs.
289 * @bt_reduced_txpower: is reduced tx power enabled for this station 290 * @bt_reduced_txpower: is reduced tx power enabled for this station
290 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and 291 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
291 * we need to signal the EOSP 292 * we need to signal the EOSP
@@ -306,7 +307,6 @@ struct iwl_mvm_sta {
306 u32 mac_id_n_color; 307 u32 mac_id_n_color;
307 u16 tid_disable_agg; 308 u16 tid_disable_agg;
308 u8 max_agg_bufsize; 309 u8 max_agg_bufsize;
309 bool bt_reduced_txpower_dbg;
310 bool bt_reduced_txpower; 310 bool bt_reduced_txpower;
311 bool next_status_eosp; 311 bool next_status_eosp;
312 spinlock_t lock; 312 spinlock_t lock;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 61331245ad93..80100f6cc12a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
273 return true; 273 return true;
274} 274}
275 275
276/* used to convert from time event API v2 to v1 */
277#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
278 TE_V2_EVENT_SOCIOPATHIC)
279static inline u16 te_v2_get_notify(__le16 policy)
280{
281 return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
282}
283
284static inline u16 te_v2_get_dep_policy(__le16 policy)
285{
286 return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
287 TE_V2_PLACEMENT_POS;
288}
289
290static inline u16 te_v2_get_absence(__le16 policy)
291{
292 return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
293}
294
295static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
296 struct iwl_time_event_cmd_v1 *cmd_v1)
297{
298 cmd_v1->id_and_color = cmd_v2->id_and_color;
299 cmd_v1->action = cmd_v2->action;
300 cmd_v1->id = cmd_v2->id;
301 cmd_v1->apply_time = cmd_v2->apply_time;
302 cmd_v1->max_delay = cmd_v2->max_delay;
303 cmd_v1->depends_on = cmd_v2->depends_on;
304 cmd_v1->interval = cmd_v2->interval;
305 cmd_v1->duration = cmd_v2->duration;
306 if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
307 cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
308 else
309 cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
310 cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
311 cmd_v1->interval_reciprocal = 0; /* unused */
312
313 cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
314 cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
315 cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
316}
317
318static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
319 const struct iwl_time_event_cmd_v2 *cmd)
320{
321 struct iwl_time_event_cmd_v1 cmd_v1;
322
323 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
324 return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
325 sizeof(*cmd), cmd);
326
327 iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
328 return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
329 sizeof(cmd_v1), &cmd_v1);
330}
331
332
333static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, 276static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
334 struct ieee80211_vif *vif, 277 struct ieee80211_vif *vif,
335 struct iwl_mvm_time_event_data *te_data, 278 struct iwl_mvm_time_event_data *te_data,
336 struct iwl_time_event_cmd_v2 *te_cmd) 279 struct iwl_time_event_cmd *te_cmd)
337{ 280{
338 static const u8 time_event_response[] = { TIME_EVENT_CMD }; 281 static const u8 time_event_response[] = { TIME_EVENT_CMD };
339 struct iwl_notification_wait wait_time_event; 282 struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
369 ARRAY_SIZE(time_event_response), 312 ARRAY_SIZE(time_event_response),
370 iwl_mvm_time_event_response, te_data); 313 iwl_mvm_time_event_response, te_data);
371 314
372 ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd); 315 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
316 sizeof(*te_cmd), te_cmd);
373 if (ret) { 317 if (ret) {
374 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); 318 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
375 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 319 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
397{ 341{
398 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
399 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 343 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
400 struct iwl_time_event_cmd_v2 time_cmd = {}; 344 struct iwl_time_event_cmd time_cmd = {};
401 345
402 lockdep_assert_held(&mvm->mutex); 346 lockdep_assert_held(&mvm->mutex);
403 347
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
453 struct iwl_mvm_vif *mvmvif, 397 struct iwl_mvm_vif *mvmvif,
454 struct iwl_mvm_time_event_data *te_data) 398 struct iwl_mvm_time_event_data *te_data)
455{ 399{
456 struct iwl_time_event_cmd_v2 time_cmd = {}; 400 struct iwl_time_event_cmd time_cmd = {};
457 u32 id, uid; 401 u32 id, uid;
458 int ret; 402 int ret;
459 403
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
490 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 434 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
491 435
492 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); 436 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
493 ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd); 437 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
438 sizeof(time_cmd), &time_cmd);
494 if (WARN_ON(ret)) 439 if (WARN_ON(ret))
495 return; 440 return;
496} 441}
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
510{ 455{
511 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 456 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
512 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 457 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
513 struct iwl_time_event_cmd_v2 time_cmd = {}; 458 struct iwl_time_event_cmd time_cmd = {};
514 459
515 lockdep_assert_held(&mvm->mutex); 460 lockdep_assert_held(&mvm->mutex);
516 if (te_data->running) { 461 if (te_data->running) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 7a99fa361954..868561512783 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -409,7 +409,6 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
409 .id = REPLY_THERMAL_MNG_BACKOFF, 409 .id = REPLY_THERMAL_MNG_BACKOFF,
410 .len = { sizeof(u32), }, 410 .len = { sizeof(u32), },
411 .data = { &backoff, }, 411 .data = { &backoff, },
412 .flags = CMD_SYNC,
413 }; 412 };
414 413
415 backoff = max(backoff, mvm->thermal_throttle.min_backoff); 414 backoff = max(backoff, mvm->thermal_throttle.min_backoff);
@@ -468,13 +467,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
468 } 467 }
469 468
470 if (params->support_tx_backoff) { 469 if (params->support_tx_backoff) {
471 tx_backoff = 0; 470 tx_backoff = tt->min_backoff;
472 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { 471 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
473 if (temperature < params->tx_backoff[i].temperature) 472 if (temperature < params->tx_backoff[i].temperature)
474 break; 473 break;
475 tx_backoff = params->tx_backoff[i].backoff; 474 tx_backoff = max(tt->min_backoff,
475 params->tx_backoff[i].backoff);
476 } 476 }
477 if (tx_backoff != 0) 477 if (tx_backoff != tt->min_backoff)
478 throttle_enable = true; 478 throttle_enable = true;
479 if (tt->tx_backoff != tx_backoff) 479 if (tt->tx_backoff != tx_backoff)
480 iwl_mvm_tt_tx_backoff(mvm, tx_backoff); 480 iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +484,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
484 IWL_WARN(mvm, 484 IWL_WARN(mvm,
485 "Due to high temperature thermal throttling initiated\n"); 485 "Due to high temperature thermal throttling initiated\n");
486 tt->throttle = true; 486 tt->throttle = true;
487 } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 && 487 } else if (tt->throttle && !tt->dynamic_smps &&
488 tt->tx_backoff == tt->min_backoff &&
488 temperature <= params->tx_protection_exit) { 489 temperature <= params->tx_protection_exit) {
489 IWL_WARN(mvm, 490 IWL_WARN(mvm,
490 "Temperature is back to normal thermal throttling stopped\n"); 491 "Temperature is back to normal thermal throttling stopped\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 879aeac46cc1..3846a6c41eb1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
636 seq_ctl = le16_to_cpu(hdr->seq_ctrl); 636 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
637 } 637 }
638 638
639 ieee80211_tx_status_ni(mvm->hw, skb); 639 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
640 info->status.status_driver_data[0] =
641 (void *)(uintptr_t)tx_resp->reduced_tpc;
642
643 ieee80211_tx_status(mvm->hw, skb);
640 } 644 }
641 645
642 if (txq_id >= mvm->first_agg_queue) { 646 if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
815 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 819 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
816 mvmsta->tid_data[tid].rate_n_flags = 820 mvmsta->tid_data[tid].rate_n_flags =
817 le32_to_cpu(tx_resp->initial_rate); 821 le32_to_cpu(tx_resp->initial_rate);
822 mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
818 } 823 }
819 824
820 rcu_read_unlock(); 825 rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
928 info->status.ampdu_len = ba_notif->txed; 933 info->status.ampdu_len = ba_notif->txed;
929 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, 934 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
930 info); 935 info);
936 info->status.status_driver_data[0] =
937 (void *)(uintptr_t)tid_data->reduced_tpc;
931 } 938 }
932 } 939 }
933 940
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
937 944
938 while (!skb_queue_empty(&reclaimed_skbs)) { 945 while (!skb_queue_empty(&reclaimed_skbs)) {
939 skb = __skb_dequeue(&reclaimed_skbs); 946 skb = __skb_dequeue(&reclaimed_skbs);
940 ieee80211_tx_status_ni(mvm->hw, skb); 947 ieee80211_tx_status(mvm->hw, skb);
941 } 948 }
942 949
943 return 0; 950 return 0;
@@ -951,7 +958,7 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
951 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), 958 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
952 }; 959 };
953 960
954 u32 flags = sync ? CMD_SYNC : CMD_ASYNC; 961 u32 flags = sync ? 0 : CMD_ASYNC;
955 962
956 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, 963 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
957 sizeof(flush_cmd), &flush_cmd); 964 sizeof(flush_cmd), &flush_cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 2180902266ae..aa9fc77e8413 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -64,6 +64,7 @@
64 64
65#include "iwl-debug.h" 65#include "iwl-debug.h"
66#include "iwl-io.h" 66#include "iwl-io.h"
67#include "iwl-prph.h"
67 68
68#include "mvm.h" 69#include "mvm.h"
69#include "fw-api-rs.h" 70#include "fw-api-rs.h"
@@ -143,7 +144,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
143 "cmd flags %x", cmd->flags)) 144 "cmd flags %x", cmd->flags))
144 return -EINVAL; 145 return -EINVAL;
145 146
146 cmd->flags |= CMD_SYNC | CMD_WANT_SKB; 147 cmd->flags |= CMD_WANT_SKB;
147 148
148 ret = iwl_trans_send_cmd(mvm->trans, cmd); 149 ret = iwl_trans_send_cmd(mvm->trans, cmd);
149 if (ret == -ERFKILL) { 150 if (ret == -ERFKILL) {
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
469 mvm->status, table.valid); 470 mvm->status, table.valid);
470 } 471 }
471 472
473 /* Do not change this output - scripts rely on it */
474
472 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); 475 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
473 476
474 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 477 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -516,13 +519,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
516 iwl_mvm_dump_umac_error_log(mvm); 519 iwl_mvm_dump_umac_error_log(mvm);
517} 520}
518 521
522#ifdef CONFIG_IWLWIFI_DEBUGFS
519void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm) 523void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
520{ 524{
521 const struct fw_img *img; 525 const struct fw_img *img;
522 u32 ofs, sram_len; 526 u32 ofs, sram_len;
523 void *sram; 527 void *sram;
524 528
525 if (!mvm->ucode_loaded || mvm->fw_error_sram) 529 if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
526 return; 530 return;
527 531
528 img = &mvm->fw->img[mvm->cur_ucode]; 532 img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +542,48 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
538 mvm->fw_error_sram_len = sram_len; 542 mvm->fw_error_sram_len = sram_len;
539} 543}
540 544
545void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
546{
547 int i, reg_val;
548 unsigned long flags;
549
550 if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
551 return;
552
553 /* reading buffer size */
554 reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
555 mvm->fw_error_rxf_len =
556 (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
557
558 /* the register holds the value divided by 128 */
559 mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
560
561 if (!mvm->fw_error_rxf_len)
562 return;
563
564 mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
565 if (!mvm->fw_error_rxf) {
566 mvm->fw_error_rxf_len = 0;
567 return;
568 }
569
570 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
571 kfree(mvm->fw_error_rxf);
572 mvm->fw_error_rxf = NULL;
573 mvm->fw_error_rxf_len = 0;
574 return;
575 }
576
577 for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
578 iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
579 i * sizeof(u32));
580 mvm->fw_error_rxf[i] =
581 iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
582 }
583 iwl_trans_release_nic_access(mvm->trans, &flags);
584}
585#endif
586
541/** 587/**
542 * iwl_mvm_send_lq_cmd() - Send link quality command 588 * iwl_mvm_send_lq_cmd() - Send link quality command
543 * @init: This command is sent as part of station initialization right 589 * @init: This command is sent as part of station initialization right
@@ -553,7 +599,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
553 struct iwl_host_cmd cmd = { 599 struct iwl_host_cmd cmd = {
554 .id = LQ_CMD, 600 .id = LQ_CMD,
555 .len = { sizeof(struct iwl_lq_cmd), }, 601 .len = { sizeof(struct iwl_lq_cmd), },
556 .flags = init ? CMD_SYNC : CMD_ASYNC, 602 .flags = init ? 0 : CMD_ASYNC,
557 .data = { lq, }, 603 .data = { lq, },
558 }; 604 };
559 605
@@ -604,6 +650,39 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
604 ieee80211_request_smps(vif, smps_mode); 650 ieee80211_request_smps(vif, smps_mode);
605} 651}
606 652
653static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
654 struct ieee80211_vif *vif)
655{
656 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
657 bool *result = _data;
658 int i;
659
660 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
661 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
662 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
663 *result = false;
664 }
665}
666
667bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
668{
669 bool result = true;
670
671 lockdep_assert_held(&mvm->mutex);
672
673 if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
674 return false;
675
676 if (!mvm->cfg->rx_with_siso_diversity)
677 return false;
678
679 ieee80211_iterate_active_interfaces_atomic(
680 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
681 iwl_mvm_diversity_iter, &result);
682
683 return result;
684}
685
607int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 686int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
608 bool value) 687 bool value)
609{ 688{
@@ -623,7 +702,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
623 702
624 iwl_mvm_bt_coex_vif_change(mvm); 703 iwl_mvm_bt_coex_vif_change(mvm);
625 704
626 return iwl_mvm_power_update_mac(mvm, vif); 705 return iwl_mvm_power_update_mac(mvm);
627} 706}
628 707
629static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) 708static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3d1d57f9f5bc..7091a18d5a72 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -417,7 +417,7 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
417 splx->package.count != 2 || 417 splx->package.count != 2 ||
418 splx->package.elements[0].type != ACPI_TYPE_INTEGER || 418 splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
419 splx->package.elements[0].integer.value != 0) { 419 splx->package.elements[0].integer.value != 0) {
420 IWL_ERR(trans, "Unsupported splx structure"); 420 IWL_ERR(trans, "Unsupported splx structure\n");
421 return 0; 421 return 0;
422 } 422 }
423 423
@@ -426,14 +426,14 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
426 limits->package.count < 2 || 426 limits->package.count < 2 ||
427 limits->package.elements[0].type != ACPI_TYPE_INTEGER || 427 limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
428 limits->package.elements[1].type != ACPI_TYPE_INTEGER) { 428 limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
429 IWL_ERR(trans, "Invalid limits element"); 429 IWL_ERR(trans, "Invalid limits element\n");
430 return 0; 430 return 0;
431 } 431 }
432 432
433 domain_type = &limits->package.elements[0]; 433 domain_type = &limits->package.elements[0];
434 power_limit = &limits->package.elements[1]; 434 power_limit = &limits->package.elements[1];
435 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { 435 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
436 IWL_DEBUG_INFO(trans, "WiFi power is not limited"); 436 IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
437 return 0; 437 return 0;
438 } 438 }
439 439
@@ -450,26 +450,26 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
450 pxsx_handle = ACPI_HANDLE(&pdev->dev); 450 pxsx_handle = ACPI_HANDLE(&pdev->dev);
451 if (!pxsx_handle) { 451 if (!pxsx_handle) {
452 IWL_DEBUG_INFO(trans, 452 IWL_DEBUG_INFO(trans,
453 "Could not retrieve root port ACPI handle"); 453 "Could not retrieve root port ACPI handle\n");
454 return; 454 return;
455 } 455 }
456 456
457 /* Get the method's handle */ 457 /* Get the method's handle */
458 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); 458 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
459 if (ACPI_FAILURE(status)) { 459 if (ACPI_FAILURE(status)) {
460 IWL_DEBUG_INFO(trans, "SPL method not found"); 460 IWL_DEBUG_INFO(trans, "SPL method not found\n");
461 return; 461 return;
462 } 462 }
463 463
464 /* Call SPLC with no arguments */ 464 /* Call SPLC with no arguments */
465 status = acpi_evaluate_object(handle, NULL, NULL, &splx); 465 status = acpi_evaluate_object(handle, NULL, NULL, &splx);
466 if (ACPI_FAILURE(status)) { 466 if (ACPI_FAILURE(status)) {
467 IWL_ERR(trans, "SPLC invocation failed (0x%x)", status); 467 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
468 return; 468 return;
469 } 469 }
470 470
471 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); 471 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
472 IWL_DEBUG_INFO(trans, "Default power limit set to %lld", 472 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
473 trans->dflt_pwr_limit); 473 trans->dflt_pwr_limit);
474 kfree(splx.pointer); 474 kfree(splx.pointer);
475} 475}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 9091513ea738..6c22b23a2845 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -102,7 +102,7 @@ struct iwl_rxq {
102 u32 write_actual; 102 u32 write_actual;
103 struct list_head rx_free; 103 struct list_head rx_free;
104 struct list_head rx_used; 104 struct list_head rx_used;
105 int need_update; 105 bool need_update;
106 struct iwl_rb_status *rb_stts; 106 struct iwl_rb_status *rb_stts;
107 dma_addr_t rb_stts_dma; 107 dma_addr_t rb_stts_dma;
108 spinlock_t lock; 108 spinlock_t lock;
@@ -117,21 +117,19 @@ struct iwl_dma_ptr {
117/** 117/**
118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
119 * @index -- current index 119 * @index -- current index
120 * @n_bd -- total number of entries in queue (must be power of 2)
121 */ 120 */
122static inline int iwl_queue_inc_wrap(int index, int n_bd) 121static inline int iwl_queue_inc_wrap(int index)
123{ 122{
124 return ++index & (n_bd - 1); 123 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
125} 124}
126 125
127/** 126/**
128 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 127 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
129 * @index -- current index 128 * @index -- current index
130 * @n_bd -- total number of entries in queue (must be power of 2)
131 */ 129 */
132static inline int iwl_queue_dec_wrap(int index, int n_bd) 130static inline int iwl_queue_dec_wrap(int index)
133{ 131{
134 return --index & (n_bd - 1); 132 return --index & (TFD_QUEUE_SIZE_MAX - 1);
135} 133}
136 134
137struct iwl_cmd_meta { 135struct iwl_cmd_meta {
@@ -145,13 +143,13 @@ struct iwl_cmd_meta {
145 * 143 *
146 * Contains common data for Rx and Tx queues. 144 * Contains common data for Rx and Tx queues.
147 * 145 *
148 * Note the difference between n_bd and n_window: the hardware 146 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
149 * always assumes 256 descriptors, so n_bd is always 256 (unless 147 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
150 * there might be HW changes in the future). For the normal TX 148 * there might be HW changes in the future). For the normal TX
151 * queues, n_window, which is the size of the software queue data 149 * queues, n_window, which is the size of the software queue data
152 * is also 256; however, for the command queue, n_window is only 150 * is also 256; however, for the command queue, n_window is only
153 * 32 since we don't need so many commands pending. Since the HW 151 * 32 since we don't need so many commands pending. Since the HW
154 * still uses 256 BDs for DMA though, n_bd stays 256. As a result, 152 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
155 * the software buffers (in the variables @meta, @txb in struct 153 * the software buffers (in the variables @meta, @txb in struct
156 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in 154 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
157 * the same struct) have 256. 155 * the same struct) have 256.
@@ -162,7 +160,6 @@ struct iwl_cmd_meta {
162 * data is a window overlayed over the HW queue. 160 * data is a window overlayed over the HW queue.
163 */ 161 */
164struct iwl_queue { 162struct iwl_queue {
165 int n_bd; /* number of BDs in this queue */
166 int write_ptr; /* 1-st empty entry (index) host_w*/ 163 int write_ptr; /* 1-st empty entry (index) host_w*/
167 int read_ptr; /* last used entry (index) host_r*/ 164 int read_ptr; /* last used entry (index) host_r*/
168 /* use for monitoring and recovering the stuck queue */ 165 /* use for monitoring and recovering the stuck queue */
@@ -231,7 +228,7 @@ struct iwl_txq {
231 spinlock_t lock; 228 spinlock_t lock;
232 struct timer_list stuck_timer; 229 struct timer_list stuck_timer;
233 struct iwl_trans_pcie *trans_pcie; 230 struct iwl_trans_pcie *trans_pcie;
234 u8 need_update; 231 bool need_update;
235 u8 active; 232 u8 active;
236 bool ampdu; 233 bool ampdu;
237}; 234};
@@ -270,6 +267,9 @@ struct iwl_trans_pcie {
270 struct iwl_trans *trans; 267 struct iwl_trans *trans;
271 struct iwl_drv *drv; 268 struct iwl_drv *drv;
272 269
270 struct net_device napi_dev;
271 struct napi_struct napi;
272
273 /* INT ICT Table */ 273 /* INT ICT Table */
274 __le32 *ict_tbl; 274 __le32 *ict_tbl;
275 dma_addr_t ict_tbl_dma; 275 dma_addr_t ict_tbl_dma;
@@ -362,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
362void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); 362void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
363int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 363int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
364 struct iwl_device_cmd *dev_cmd, int txq_id); 364 struct iwl_device_cmd *dev_cmd, int txq_id);
365void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); 365void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
366int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 366int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
367void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 367void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
368 struct iwl_rx_cmd_buffer *rxb, int handler_status); 368 struct iwl_rx_cmd_buffer *rxb, int handler_status);
@@ -370,6 +370,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
370 struct sk_buff_head *skbs); 370 struct sk_buff_head *skbs);
371void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 371void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
372 372
373static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
374{
375 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
376
377 return le16_to_cpu(tb->hi_n_len) >> 4;
378}
379
373/***************************************************** 380/*****************************************************
374* Error handling 381* Error handling
375******************************************************/ 382******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fdfa3969cac9..a2698e5e062c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
145/* 145/*
146 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 146 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
147 */ 147 */
148static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 148static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
149 struct iwl_rxq *rxq)
150{ 149{
150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
151 struct iwl_rxq *rxq = &trans_pcie->rxq;
151 u32 reg; 152 u32 reg;
152 153
153 spin_lock(&rxq->lock); 154 lockdep_assert_held(&rxq->lock);
154
155 if (rxq->need_update == 0)
156 goto exit_unlock;
157 155
158 /* 156 /*
159 * explicitly wake up the NIC if: 157 * explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
169 reg); 167 reg);
170 iwl_set_bit(trans, CSR_GP_CNTRL, 168 iwl_set_bit(trans, CSR_GP_CNTRL,
171 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 169 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
172 goto exit_unlock; 170 rxq->need_update = true;
171 return;
173 } 172 }
174 } 173 }
175 174
176 rxq->write_actual = round_down(rxq->write, 8); 175 rxq->write_actual = round_down(rxq->write, 8);
177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 176 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
178 rxq->need_update = 0; 177}
178
179static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
180{
181 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
182 struct iwl_rxq *rxq = &trans_pcie->rxq;
183
184 spin_lock(&rxq->lock);
185
186 if (!rxq->need_update)
187 goto exit_unlock;
188
189 iwl_pcie_rxq_inc_wr_ptr(trans);
190 rxq->need_update = false;
179 191
180 exit_unlock: 192 exit_unlock:
181 spin_unlock(&rxq->lock); 193 spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
236 * Increment device's write pointer in multiples of 8. */ 248 * Increment device's write pointer in multiples of 8. */
237 if (rxq->write_actual != (rxq->write & ~0x7)) { 249 if (rxq->write_actual != (rxq->write & ~0x7)) {
238 spin_lock(&rxq->lock); 250 spin_lock(&rxq->lock);
239 rxq->need_update = 1; 251 iwl_pcie_rxq_inc_wr_ptr(trans);
240 spin_unlock(&rxq->lock); 252 spin_unlock(&rxq->lock);
241 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
242 } 253 }
243} 254}
244 255
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
362 * Also restock the Rx queue via iwl_pcie_rxq_restock. 373 * Also restock the Rx queue via iwl_pcie_rxq_restock.
363 * This is called as a scheduled work item (except for during initialization) 374 * This is called as a scheduled work item (except for during initialization)
364 */ 375 */
365static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 376static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
366{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368
369 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
370
371 spin_lock(&trans_pcie->irq_lock);
372 iwl_pcie_rxq_restock(trans);
373 spin_unlock(&trans_pcie->irq_lock);
374}
375
376static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
377{ 377{
378 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); 378 iwl_pcie_rxq_alloc_rbs(trans, gfp);
379 379
380 iwl_pcie_rxq_restock(trans); 380 iwl_pcie_rxq_restock(trans);
381} 381}
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
385 struct iwl_trans_pcie *trans_pcie = 385 struct iwl_trans_pcie *trans_pcie =
386 container_of(data, struct iwl_trans_pcie, rx_replenish); 386 container_of(data, struct iwl_trans_pcie, rx_replenish);
387 387
388 iwl_pcie_rx_replenish(trans_pcie->trans); 388 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
389} 389}
390 390
391static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 391static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
521 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 521 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
522 spin_unlock(&rxq->lock); 522 spin_unlock(&rxq->lock);
523 523
524 iwl_pcie_rx_replenish(trans); 524 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
525 525
526 iwl_pcie_rx_hw_init(trans, rxq); 526 iwl_pcie_rx_hw_init(trans, rxq);
527 527
528 spin_lock(&trans_pcie->irq_lock); 528 spin_lock(&rxq->lock);
529 rxq->need_update = 1; 529 iwl_pcie_rxq_inc_wr_ptr(trans);
530 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 530 spin_unlock(&rxq->lock);
531 spin_unlock(&trans_pcie->irq_lock);
532 531
533 return 0; 532 return 0;
534} 533}
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
673 /* Reuse the page if possible. For notification packets and 672 /* Reuse the page if possible. For notification packets and
674 * SKBs that fail to Rx correctly, add them back into the 673 * SKBs that fail to Rx correctly, add them back into the
675 * rx_free list for reuse later. */ 674 * rx_free list for reuse later. */
676 spin_lock(&rxq->lock);
677 if (rxb->page != NULL) { 675 if (rxb->page != NULL) {
678 rxb->page_dma = 676 rxb->page_dma =
679 dma_map_page(trans->dev, rxb->page, 0, 677 dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
694 } 692 }
695 } else 693 } else
696 list_add_tail(&rxb->list, &rxq->rx_used); 694 list_add_tail(&rxb->list, &rxq->rx_used);
697 spin_unlock(&rxq->lock);
698} 695}
699 696
700/* 697/*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
709 u32 count = 8; 706 u32 count = 8;
710 int total_empty; 707 int total_empty;
711 708
709restart:
710 spin_lock(&rxq->lock);
712 /* uCode's read index (stored in shared DRAM) indicates the last Rx 711 /* uCode's read index (stored in shared DRAM) indicates the last Rx
713 * buffer that the driver may process (last buffer filled by ucode). */ 712 * buffer that the driver may process (last buffer filled by ucode). */
714 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 713 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
743 count++; 742 count++;
744 if (count >= 8) { 743 if (count >= 8) {
745 rxq->read = i; 744 rxq->read = i;
746 iwl_pcie_rx_replenish_now(trans); 745 spin_unlock(&rxq->lock);
746 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
747 count = 0; 747 count = 0;
748 goto restart;
748 } 749 }
749 } 750 }
750 } 751 }
751 752
752 /* Backtrack one entry */ 753 /* Backtrack one entry */
753 rxq->read = i; 754 rxq->read = i;
755 spin_unlock(&rxq->lock);
756
754 if (fill_rx) 757 if (fill_rx)
755 iwl_pcie_rx_replenish_now(trans); 758 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
756 else 759 else
757 iwl_pcie_rxq_restock(trans); 760 iwl_pcie_rxq_restock(trans);
761
762 if (trans_pcie->napi.poll)
763 napi_gro_flush(&trans_pcie->napi, false);
758} 764}
759 765
760/* 766/*
@@ -844,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
844 trans_pcie->ict_index, read); 850 trans_pcie->ict_index, read);
845 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 851 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
846 trans_pcie->ict_index = 852 trans_pcie->ict_index =
847 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); 853 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
848 854
849 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 855 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
850 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 856 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
876 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 882 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
877 u32 inta = 0; 883 u32 inta = 0;
878 u32 handled = 0; 884 u32 handled = 0;
879 u32 i;
880 885
881 lock_map_acquire(&trans->sync_cmd_lockdep_map); 886 lock_map_acquire(&trans->sync_cmd_lockdep_map);
882 887
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1028 /* uCode wakes up after power-down sleep */ 1033 /* uCode wakes up after power-down sleep */
1029 if (inta & CSR_INT_BIT_WAKEUP) { 1034 if (inta & CSR_INT_BIT_WAKEUP) {
1030 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1035 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1031 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); 1036 iwl_pcie_rxq_check_wrptr(trans);
1032 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 1037 iwl_pcie_txq_check_wrptrs(trans);
1033 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
1034 1038
1035 isr_stats->wakeup++; 1039 isr_stats->wakeup++;
1036 1040
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1068 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1072 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1069 CSR_INT_PERIODIC_DIS); 1073 CSR_INT_PERIODIC_DIS);
1070 1074
1071 iwl_pcie_rx_handle(trans);
1072
1073 /* 1075 /*
1074 * Enable periodic interrupt in 8 msec only if we received 1076 * Enable periodic interrupt in 8 msec only if we received
1075 * real RX interrupt (instead of just periodic int), to catch 1077 * real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1082 CSR_INT_PERIODIC_ENA); 1084 CSR_INT_PERIODIC_ENA);
1083 1085
1084 isr_stats->rx++; 1086 isr_stats->rx++;
1087
1088 local_bh_disable();
1089 iwl_pcie_rx_handle(trans);
1090 local_bh_enable();
1085 } 1091 }
1086 1092
1087 /* This "Tx" DMA channel is used only for loading uCode */ 1093 /* This "Tx" DMA channel is used only for loading uCode */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 2365553f1ef7..788085bc65d7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -73,6 +73,7 @@
73#include "iwl-csr.h" 73#include "iwl-csr.h"
74#include "iwl-prph.h" 74#include "iwl-prph.h"
75#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "iwl-fw-error-dump.h"
76#include "internal.h" 77#include "internal.h"
77 78
78static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 79static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -103,7 +104,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
103 104
104/* PCI registers */ 105/* PCI registers */
105#define PCI_CFG_RETRY_TIMEOUT 0x041 106#define PCI_CFG_RETRY_TIMEOUT 0x041
106#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
107 107
108static void iwl_pcie_apm_config(struct iwl_trans *trans) 108static void iwl_pcie_apm_config(struct iwl_trans *trans)
109{ 109{
@@ -454,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
454{ 454{
455 int ret; 455 int ret;
456 int t = 0; 456 int t = 0;
457 int iter;
457 458
458 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 459 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
459 460
@@ -462,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
462 if (ret >= 0) 463 if (ret >= 0)
463 return 0; 464 return 0;
464 465
465 /* If HW is not ready, prepare the conditions to check again */ 466 for (iter = 0; iter < 10; iter++) {
466 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 467 /* If HW is not ready, prepare the conditions to check again */
467 CSR_HW_IF_CONFIG_REG_PREPARE); 468 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
469 CSR_HW_IF_CONFIG_REG_PREPARE);
470
471 do {
472 ret = iwl_pcie_set_hw_ready(trans);
473 if (ret >= 0)
474 return 0;
468 475
469 do { 476 usleep_range(200, 1000);
470 ret = iwl_pcie_set_hw_ready(trans); 477 t += 200;
471 if (ret >= 0) 478 } while (t < 150000);
472 return 0; 479 msleep(25);
480 }
473 481
474 usleep_range(200, 1000); 482 IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
475 t += 200;
476 } while (t < 150000);
477 483
478 return ret; 484 return ret;
479} 485}
@@ -1053,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1053 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1059 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1054} 1060}
1055 1061
1062static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1063{
1064 WARN_ON(1);
1065 return 0;
1066}
1067
1056static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1068static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1057 const struct iwl_trans_config *trans_cfg) 1069 const struct iwl_trans_config *trans_cfg)
1058{ 1070{
@@ -1079,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1079 1091
1080 trans_pcie->command_names = trans_cfg->command_names; 1092 trans_pcie->command_names = trans_cfg->command_names;
1081 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; 1093 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1094
1095 /* Initialize NAPI here - it should be before registering to mac80211
1096 * in the opmode but after the HW struct is allocated.
1097 * As this function may be called again in some corner cases don't
1098 * do anything if NAPI was already initialized.
1099 */
1100 if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1101 init_dummy_netdev(&trans_pcie->napi_dev);
1102 iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1103 &trans_pcie->napi_dev,
1104 iwl_pcie_dummy_napi_poll, 64);
1105 }
1082} 1106}
1083 1107
1084void iwl_trans_pcie_free(struct iwl_trans *trans) 1108void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1123,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1099 pci_disable_device(trans_pcie->pci_dev); 1123 pci_disable_device(trans_pcie->pci_dev);
1100 kmem_cache_destroy(trans->dev_cmd_pool); 1124 kmem_cache_destroy(trans->dev_cmd_pool);
1101 1125
1126 if (trans_pcie->napi.poll)
1127 netif_napi_del(&trans_pcie->napi);
1128
1102 kfree(trans); 1129 kfree(trans);
1103} 1130}
1104 1131
@@ -1237,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1237 1264
1238#define IWL_FLUSH_WAIT_MS 2000 1265#define IWL_FLUSH_WAIT_MS 2000
1239 1266
1240static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) 1267static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1241{ 1268{
1242 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1243 struct iwl_txq *txq; 1270 struct iwl_txq *txq;
@@ -1250,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1250 1277
1251 /* waiting for all the tx frames complete might take a while */ 1278 /* waiting for all the tx frames complete might take a while */
1252 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 1279 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1280 u8 wr_ptr;
1281
1253 if (cnt == trans_pcie->cmd_queue) 1282 if (cnt == trans_pcie->cmd_queue)
1254 continue; 1283 continue;
1284 if (!test_bit(cnt, trans_pcie->queue_used))
1285 continue;
1286 if (!(BIT(cnt) & txq_bm))
1287 continue;
1288
1289 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1255 txq = &trans_pcie->txq[cnt]; 1290 txq = &trans_pcie->txq[cnt];
1256 q = &txq->q; 1291 q = &txq->q;
1257 while (q->read_ptr != q->write_ptr && !time_after(jiffies, 1292 wr_ptr = ACCESS_ONCE(q->write_ptr);
1258 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) 1293
1294 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1295 !time_after(jiffies,
1296 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1297 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1298
1299 if (WARN_ONCE(wr_ptr != write_ptr,
1300 "WR pointer moved while flushing %d -> %d\n",
1301 wr_ptr, write_ptr))
1302 return -ETIMEDOUT;
1259 msleep(1); 1303 msleep(1);
1304 }
1260 1305
1261 if (q->read_ptr != q->write_ptr) { 1306 if (q->read_ptr != q->write_ptr) {
1262 IWL_ERR(trans, 1307 IWL_ERR(trans,
@@ -1264,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1264 ret = -ETIMEDOUT; 1309 ret = -ETIMEDOUT;
1265 break; 1310 break;
1266 } 1311 }
1312 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1267 } 1313 }
1268 1314
1269 if (!ret) 1315 if (!ret)
@@ -1298,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1298 IWL_ERR(trans, 1344 IWL_ERR(trans,
1299 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 1345 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1300 cnt, active ? "" : "in", fifo, tbl_dw, 1346 cnt, active ? "" : "in", fifo, tbl_dw,
1301 iwl_read_prph(trans, 1347 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1302 SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1), 1348 (TFD_QUEUE_SIZE_MAX - 1),
1303 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); 1349 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1304 } 1350 }
1305 1351
@@ -1630,6 +1676,61 @@ err:
1630 IWL_ERR(trans, "failed to create the trans debugfs entry\n"); 1676 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1631 return -ENOMEM; 1677 return -ENOMEM;
1632} 1678}
1679
1680static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
1681{
1682 u32 cmdlen = 0;
1683 int i;
1684
1685 for (i = 0; i < IWL_NUM_OF_TBS; i++)
1686 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
1687
1688 return cmdlen;
1689}
1690
1691static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
1692 void *buf, u32 buflen)
1693{
1694 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1695 struct iwl_fw_error_dump_data *data;
1696 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
1697 struct iwl_fw_error_dump_txcmd *txcmd;
1698 u32 len;
1699 int i, ptr;
1700
1701 if (!buf)
1702 return sizeof(*data) +
1703 cmdq->q.n_window * (sizeof(*txcmd) +
1704 TFD_MAX_PAYLOAD_SIZE);
1705
1706 len = 0;
1707 data = buf;
1708 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
1709 txcmd = (void *)data->data;
1710 spin_lock_bh(&cmdq->lock);
1711 ptr = cmdq->q.write_ptr;
1712 for (i = 0; i < cmdq->q.n_window; i++) {
1713 u8 idx = get_cmd_index(&cmdq->q, ptr);
1714 u32 caplen, cmdlen;
1715
1716 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
1717 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
1718
1719 if (cmdlen) {
1720 len += sizeof(*txcmd) + caplen;
1721 txcmd->cmdlen = cpu_to_le32(cmdlen);
1722 txcmd->caplen = cpu_to_le32(caplen);
1723 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
1724 txcmd = (void *)((u8 *)txcmd->data + caplen);
1725 }
1726
1727 ptr = iwl_queue_dec_wrap(ptr);
1728 }
1729 spin_unlock_bh(&cmdq->lock);
1730
1731 data->len = cpu_to_le32(len);
1732 return sizeof(*data) + len;
1733}
1633#else 1734#else
1634static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 1735static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1635 struct dentry *dir) 1736 struct dentry *dir)
@@ -1672,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {
1672 .grab_nic_access = iwl_trans_pcie_grab_nic_access, 1773 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
1673 .release_nic_access = iwl_trans_pcie_release_nic_access, 1774 .release_nic_access = iwl_trans_pcie_release_nic_access,
1674 .set_bits_mask = iwl_trans_pcie_set_bits_mask, 1775 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
1776
1777#ifdef CONFIG_IWLWIFI_DEBUGFS
1778 .dump_data = iwl_trans_pcie_dump_data,
1779#endif
1675}; 1780};
1676 1781
1677struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 1782struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3b0c72c10054..038940afbdc5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q)
70 70
71 /* 71 /*
72 * To avoid ambiguity between empty and completely full queues, there 72 * To avoid ambiguity between empty and completely full queues, there
73 * should always be less than q->n_bd elements in the queue. 73 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
74 * If q->n_window is smaller than q->n_bd, there is no need to reserve 74 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
75 * any queue entries for this purpose. 75 * to reserve any queue entries for this purpose.
76 */ 76 */
77 if (q->n_window < q->n_bd) 77 if (q->n_window < TFD_QUEUE_SIZE_MAX)
78 max = q->n_window; 78 max = q->n_window;
79 else 79 else
80 max = q->n_bd - 1; 80 max = TFD_QUEUE_SIZE_MAX - 1;
81 81
82 /* 82 /*
83 * q->n_bd is a power of 2, so the following is equivalent to modulo by 83 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
84 * q->n_bd and is well defined for negative dividends. 84 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
85 */ 85 */
86 used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1); 86 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
87 87
88 if (WARN_ON(used > max)) 88 if (WARN_ON(used > max))
89 return 0; 89 return 0;
@@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q)
94/* 94/*
95 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 95 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
96 */ 96 */
97static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) 97static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
98{ 98{
99 q->n_bd = count;
100 q->n_window = slots_num; 99 q->n_window = slots_num;
101 q->id = id; 100 q->id = id;
102 101
103 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
104 * and iwl_queue_dec_wrap are broken. */
105 if (WARN_ON(!is_power_of_2(count)))
106 return -EINVAL;
107
108 /* slots_num must be power-of-two size, otherwise 102 /* slots_num must be power-of-two size, otherwise
109 * get_cmd_index is broken. */ 103 * get_cmd_index is broken. */
110 if (WARN_ON(!is_power_of_2(slots_num))) 104 if (WARN_ON(!is_power_of_2(slots_num)))
@@ -197,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
197 IWL_ERR(trans, 191 IWL_ERR(trans,
198 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 192 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
199 i, active ? "" : "in", fifo, tbl_dw, 193 i, active ? "" : "in", fifo, tbl_dw,
200 iwl_read_prph(trans, 194 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
201 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), 195 (TFD_QUEUE_SIZE_MAX - 1),
202 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); 196 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
203 } 197 }
204 198
205 for (i = q->read_ptr; i != q->write_ptr; 199 for (i = q->read_ptr; i != q->write_ptr;
206 i = iwl_queue_inc_wrap(i, q->n_bd)) 200 i = iwl_queue_inc_wrap(i))
207 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 201 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
208 le32_to_cpu(txq->scratchbufs[i].scratch)); 202 le32_to_cpu(txq->scratchbufs[i].scratch));
209 203
210 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1); 204 iwl_force_nmi(trans);
211} 205}
212 206
213/* 207/*
@@ -287,14 +281,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
287/* 281/*
288 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 282 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
289 */ 283 */
290void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 284static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
285 struct iwl_txq *txq)
291{ 286{
292 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
293 u32 reg = 0; 288 u32 reg = 0;
294 int txq_id = txq->q.id; 289 int txq_id = txq->q.id;
295 290
296 if (txq->need_update == 0) 291 lockdep_assert_held(&txq->lock);
297 return;
298 292
299 /* 293 /*
300 * explicitly wake up the NIC if: 294 * explicitly wake up the NIC if:
@@ -317,6 +311,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
317 txq_id, reg); 311 txq_id, reg);
318 iwl_set_bit(trans, CSR_GP_CNTRL, 312 iwl_set_bit(trans, CSR_GP_CNTRL,
319 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 313 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
314 txq->need_update = true;
320 return; 315 return;
321 } 316 }
322 } 317 }
@@ -327,8 +322,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
327 */ 322 */
328 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); 323 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
329 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 324 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
325}
326
327void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
328{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 int i;
331
332 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
333 struct iwl_txq *txq = &trans_pcie->txq[i];
330 334
331 txq->need_update = 0; 335 spin_lock_bh(&txq->lock);
336 if (trans_pcie->txq[i].need_update) {
337 iwl_pcie_txq_inc_wr_ptr(trans, txq);
338 trans_pcie->txq[i].need_update = false;
339 }
340 spin_unlock_bh(&txq->lock);
341 }
332} 342}
333 343
334static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 344static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -343,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
343 return addr; 353 return addr;
344} 354}
345 355
346static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
347{
348 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
349
350 return le16_to_cpu(tb->hi_n_len) >> 4;
351}
352
353static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 356static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
354 dma_addr_t addr, u16 len) 357 dma_addr_t addr, u16 len)
355{ 358{
@@ -409,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
409{ 412{
410 struct iwl_tfd *tfd_tmp = txq->tfds; 413 struct iwl_tfd *tfd_tmp = txq->tfds;
411 414
412 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ 415 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
416 * idx is bounded by n_window
417 */
413 int rd_ptr = txq->q.read_ptr; 418 int rd_ptr = txq->q.read_ptr;
414 int idx = get_cmd_index(&txq->q, rd_ptr); 419 int idx = get_cmd_index(&txq->q, rd_ptr);
415 420
416 lockdep_assert_held(&txq->lock); 421 lockdep_assert_held(&txq->lock);
417 422
418 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 423 /* We have only q->n_window txq->entries, but we use
424 * TFD_QUEUE_SIZE_MAX tfds
425 */
419 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); 426 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
420 427
421 /* free SKB */ 428 /* free SKB */
@@ -436,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
436} 443}
437 444
438static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 445static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
439 dma_addr_t addr, u16 len, u8 reset) 446 dma_addr_t addr, u16 len, bool reset)
440{ 447{
441 struct iwl_queue *q; 448 struct iwl_queue *q;
442 struct iwl_tfd *tfd, *tfd_tmp; 449 struct iwl_tfd *tfd, *tfd_tmp;
@@ -542,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
542{ 549{
543 int ret; 550 int ret;
544 551
545 txq->need_update = 0; 552 txq->need_update = false;
546 553
547 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 554 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
548 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 555 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
549 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 556 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
550 557
551 /* Initialize queue's high/low-water marks, and head/tail indexes */ 558 /* Initialize queue's high/low-water marks, and head/tail indexes */
552 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, 559 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
553 txq_id);
554 if (ret) 560 if (ret)
555 return ret; 561 return ret;
556 562
@@ -575,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
575 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 581 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
576 struct iwl_queue *q = &txq->q; 582 struct iwl_queue *q = &txq->q;
577 583
578 if (!q->n_bd)
579 return;
580
581 spin_lock_bh(&txq->lock); 584 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 585 while (q->write_ptr != q->read_ptr) {
583 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 586 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
584 txq_id, q->read_ptr); 587 txq_id, q->read_ptr);
585 iwl_pcie_txq_free_tfd(trans, txq); 588 iwl_pcie_txq_free_tfd(trans, txq);
586 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 589 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
587 } 590 }
588 txq->active = false; 591 txq->active = false;
589 spin_unlock_bh(&txq->lock); 592 spin_unlock_bh(&txq->lock);
@@ -620,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
620 } 623 }
621 624
622 /* De-alloc circular buffer of TFDs */ 625 /* De-alloc circular buffer of TFDs */
623 if (txq->q.n_bd) { 626 if (txq->tfds) {
624 dma_free_coherent(dev, sizeof(struct iwl_tfd) * 627 dma_free_coherent(dev,
625 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 628 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
629 txq->tfds, txq->q.dma_addr);
626 txq->q.dma_addr = 0; 630 txq->q.dma_addr = 0;
631 txq->tfds = NULL;
627 632
628 dma_free_coherent(dev, 633 dma_free_coherent(dev,
629 sizeof(*txq->scratchbufs) * txq->q.n_window, 634 sizeof(*txq->scratchbufs) * txq->q.n_window,
@@ -680,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
680 /* The chain extension of the SCD doesn't work well. This feature is 685 /* The chain extension of the SCD doesn't work well. This feature is
681 * enabled by default by the HW, so we need to disable it manually. 686 * enabled by default by the HW, so we need to disable it manually.
682 */ 687 */
683 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 688 if (trans->cfg->base_params->scd_chain_ext_wa)
689 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
684 690
685 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 691 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
686 trans_pcie->cmd_fifo); 692 trans_pcie->cmd_fifo);
@@ -931,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
931{ 937{
932 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 938 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 939 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
934 /* n_bd is usually 256 => n_bd - 1 = 0xff */ 940 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
935 int tfd_num = ssn & (txq->q.n_bd - 1);
936 struct iwl_queue *q = &txq->q; 941 struct iwl_queue *q = &txq->q;
937 int last_to_free; 942 int last_to_free;
938 943
@@ -956,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
956 961
957 /*Since we free until index _not_ inclusive, the one before index is 962 /*Since we free until index _not_ inclusive, the one before index is
958 * the last we will free. This one must be used */ 963 * the last we will free. This one must be used */
959 last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); 964 last_to_free = iwl_queue_dec_wrap(tfd_num);
960 965
961 if (!iwl_queue_used(q, last_to_free)) { 966 if (!iwl_queue_used(q, last_to_free)) {
962 IWL_ERR(trans, 967 IWL_ERR(trans,
963 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 968 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
964 __func__, txq_id, last_to_free, q->n_bd, 969 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
965 q->write_ptr, q->read_ptr); 970 q->write_ptr, q->read_ptr);
966 goto out; 971 goto out;
967 } 972 }
@@ -971,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
971 976
972 for (; 977 for (;
973 q->read_ptr != tfd_num; 978 q->read_ptr != tfd_num;
974 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 979 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
975 980
976 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 981 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
977 continue; 982 continue;
@@ -1010,25 +1015,26 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1010 1015
1011 lockdep_assert_held(&txq->lock); 1016 lockdep_assert_held(&txq->lock);
1012 1017
1013 if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { 1018 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1014 IWL_ERR(trans, 1019 IWL_ERR(trans,
1015 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1020 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1016 __func__, txq_id, idx, q->n_bd, 1021 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1017 q->write_ptr, q->read_ptr); 1022 q->write_ptr, q->read_ptr);
1018 return; 1023 return;
1019 } 1024 }
1020 1025
1021 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 1026 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1022 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1027 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1023 1028
1024 if (nfreed++ > 0) { 1029 if (nfreed++ > 0) {
1025 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1030 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1026 idx, q->write_ptr, q->read_ptr); 1031 idx, q->write_ptr, q->read_ptr);
1027 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1); 1032 iwl_force_nmi(trans);
1028 } 1033 }
1029 } 1034 }
1030 1035
1031 if (q->read_ptr == q->write_ptr) { 1036 if (trans->cfg->base_params->apmg_wake_up_wa &&
1037 q->read_ptr == q->write_ptr) {
1032 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1038 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1033 WARN_ON(!trans_pcie->cmd_in_flight); 1039 WARN_ON(!trans_pcie->cmd_in_flight);
1034 trans_pcie->cmd_in_flight = false; 1040 trans_pcie->cmd_in_flight = false;
@@ -1309,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1309 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1315 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1310 copy_size = sizeof(out_cmd->hdr); 1316 copy_size = sizeof(out_cmd->hdr);
1311 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1317 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1312 int copy = 0; 1318 int copy;
1313 1319
1314 if (!cmd->len[i]) 1320 if (!cmd->len[i])
1315 continue; 1321 continue;
1316 1322
1317 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1318 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1319 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1320
1321 if (copy > cmd->len[i])
1322 copy = cmd->len[i];
1323 }
1324
1325 /* copy everything if not nocopy/dup */ 1323 /* copy everything if not nocopy/dup */
1326 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1324 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1327 IWL_HCMD_DFL_DUP))) 1325 IWL_HCMD_DFL_DUP))) {
1328 copy = cmd->len[i]; 1326 copy = cmd->len[i];
1329 1327
1330 if (copy) {
1331 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1328 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1332 cmd_pos += copy; 1329 cmd_pos += copy;
1333 copy_size += copy; 1330 copy_size += copy;
1331 continue;
1332 }
1333
1334 /*
1335 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
1336 * in total (for the scratchbuf handling), but copy up to what
1337 * we can fit into the payload for debug dump purposes.
1338 */
1339 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1340
1341 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1342 cmd_pos += copy;
1343
1344 /* However, treat copy_size the proper way, we need it below */
1345 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1346 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1347
1348 if (copy > cmd->len[i])
1349 copy = cmd->len[i];
1350 copy_size += copy;
1334 } 1351 }
1335 } 1352 }
1336 1353
@@ -1345,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1345 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); 1362 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1346 iwl_pcie_txq_build_tfd(trans, txq, 1363 iwl_pcie_txq_build_tfd(trans, txq,
1347 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), 1364 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1348 scratch_size, 1); 1365 scratch_size, true);
1349 1366
1350 /* map first command fragment, if any remains */ 1367 /* map first command fragment, if any remains */
1351 if (copy_size > scratch_size) { 1368 if (copy_size > scratch_size) {
@@ -1361,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1361 } 1378 }
1362 1379
1363 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1380 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1364 copy_size - scratch_size, 0); 1381 copy_size - scratch_size, false);
1365 } 1382 }
1366 1383
1367 /* map the remaining (adjusted) nocopy/dup fragments */ 1384 /* map the remaining (adjusted) nocopy/dup fragments */
@@ -1384,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1384 goto out; 1401 goto out;
1385 } 1402 }
1386 1403
1387 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0); 1404 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1388 } 1405 }
1389 1406
1390 out_meta->flags = cmd->flags; 1407 out_meta->flags = cmd->flags;
@@ -1392,8 +1409,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1392 kfree(txq->entries[idx].free_buf); 1409 kfree(txq->entries[idx].free_buf);
1393 txq->entries[idx].free_buf = dup_buf; 1410 txq->entries[idx].free_buf = dup_buf;
1394 1411
1395 txq->need_update = 1;
1396
1397 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); 1412 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1398 1413
1399 /* start timer if queue currently empty */ 1414 /* start timer if queue currently empty */
@@ -1405,9 +1420,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1405 /* 1420 /*
1406 * wake up the NIC to make sure that the firmware will see the host 1421 * wake up the NIC to make sure that the firmware will see the host
1407 * command - we will let the NIC sleep once all the host commands 1422 * command - we will let the NIC sleep once all the host commands
1408 * returned. 1423 * returned. This needs to be done only on NICs that have
1424 * apmg_wake_up_wa set.
1409 */ 1425 */
1410 if (!trans_pcie->cmd_in_flight) { 1426 if (trans->cfg->base_params->apmg_wake_up_wa &&
1427 !trans_pcie->cmd_in_flight) {
1411 trans_pcie->cmd_in_flight = true; 1428 trans_pcie->cmd_in_flight = true;
1412 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1429 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1413 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1430 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1427,7 +1444,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1427 } 1444 }
1428 1445
1429 /* Increment and update queue's write index */ 1446 /* Increment and update queue's write index */
1430 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1447 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1431 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1448 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1432 1449
1433 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1450 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1583,7 +1600,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1583 get_cmd_string(trans_pcie, cmd->id)); 1600 get_cmd_string(trans_pcie, cmd->id));
1584 ret = -ETIMEDOUT; 1601 ret = -ETIMEDOUT;
1585 1602
1586 iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1); 1603 iwl_force_nmi(trans);
1587 iwl_trans_fw_error(trans); 1604 iwl_trans_fw_error(trans);
1588 1605
1589 goto cancel; 1606 goto cancel;
@@ -1661,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1661 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1678 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1662 void *tb1_addr; 1679 void *tb1_addr;
1663 u16 len, tb1_len, tb2_len; 1680 u16 len, tb1_len, tb2_len;
1664 u8 wait_write_ptr = 0; 1681 bool wait_write_ptr;
1665 __le16 fc = hdr->frame_control; 1682 __le16 fc = hdr->frame_control;
1666 u8 hdr_len = ieee80211_hdrlen(fc); 1683 u8 hdr_len = ieee80211_hdrlen(fc);
1667 u16 wifi_seq; 1684 u16 wifi_seq;
@@ -1722,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1722 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, 1739 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1723 IWL_HCMD_SCRATCHBUF_SIZE); 1740 IWL_HCMD_SCRATCHBUF_SIZE);
1724 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1741 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1725 IWL_HCMD_SCRATCHBUF_SIZE, 1); 1742 IWL_HCMD_SCRATCHBUF_SIZE, true);
1726 1743
1727 /* there must be data left over for TB1 or this code must be changed */ 1744 /* there must be data left over for TB1 or this code must be changed */
1728 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); 1745 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
@@ -1732,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1732 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1749 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1733 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1750 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1734 goto out_err; 1751 goto out_err;
1735 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); 1752 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1736 1753
1737 /* 1754 /*
1738 * Set up TFD's third entry to point directly to remainder 1755 * Set up TFD's third entry to point directly to remainder
@@ -1748,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1748 &txq->tfds[q->write_ptr]); 1765 &txq->tfds[q->write_ptr]);
1749 goto out_err; 1766 goto out_err;
1750 } 1767 }
1751 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); 1768 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1752 } 1769 }
1753 1770
1754 /* Set up entry for this TFD in Tx byte-count array */ 1771 /* Set up entry for this TFD in Tx byte-count array */
@@ -1762,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1762 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1779 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1763 skb->data + hdr_len, tb2_len); 1780 skb->data + hdr_len, tb2_len);
1764 1781
1765 if (!ieee80211_has_morefrags(fc)) { 1782 wait_write_ptr = ieee80211_has_morefrags(fc);
1766 txq->need_update = 1;
1767 } else {
1768 wait_write_ptr = 1;
1769 txq->need_update = 0;
1770 }
1771 1783
1772 /* start timer if queue currently empty */ 1784 /* start timer if queue currently empty */
1773 if (txq->need_update && q->read_ptr == q->write_ptr && 1785 if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1775,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1775 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1787 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1776 1788
1777 /* Tell device the write index *just past* this latest filled TFD */ 1789 /* Tell device the write index *just past* this latest filled TFD */
1778 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1790 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1779 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1791 if (!wait_write_ptr)
1792 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1780 1793
1781 /* 1794 /*
1782 * At this point the frame is "transmitted" successfully 1795 * At this point the frame is "transmitted" successfully
1783 * and we will get a TX status notification eventually, 1796 * and we will get a TX status notification eventually.
1784 * regardless of the value of ret. "ret" only indicates
1785 * whether or not we should update the write pointer.
1786 */ 1797 */
1787 if (iwl_queue_space(q) < q->high_mark) { 1798 if (iwl_queue_space(q) < q->high_mark) {
1788 if (wait_write_ptr) { 1799 if (wait_write_ptr)
1789 txq->need_update = 1;
1790 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1800 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1791 } else { 1801 else
1792 iwl_stop_queue(trans, txq); 1802 iwl_stop_queue(trans, txq);
1793 }
1794 } 1803 }
1795 spin_unlock(&txq->lock); 1804 spin_unlock(&txq->lock);
1796 return 0; 1805 return 0;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 54e344aed6e0..47a998d8f99e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1006,9 +1006,8 @@ struct cmd_key_material {
1006} __packed; 1006} __packed;
1007 1007
1008static int lbs_set_key_material(struct lbs_private *priv, 1008static int lbs_set_key_material(struct lbs_private *priv,
1009 int key_type, 1009 int key_type, int key_info,
1010 int key_info, 1010 const u8 *key, u16 key_len)
1011 u8 *key, u16 key_len)
1012{ 1011{
1013 struct cmd_key_material cmd; 1012 struct cmd_key_material cmd;
1014 int ret; 1013 int ret;
@@ -1610,7 +1609,7 @@ static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
1610 */ 1609 */
1611 1610
1612static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, 1611static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
1613 u8 *mac, struct station_info *sinfo) 1612 const u8 *mac, struct station_info *sinfo)
1614{ 1613{
1615 struct lbs_private *priv = wiphy_priv(wiphy); 1614 struct lbs_private *priv = wiphy_priv(wiphy);
1616 s8 signal, noise; 1615 s8 signal, noise;
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index ab966f08024a..407784aca627 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -90,7 +90,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args) 90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
91 91
92#ifdef DEBUG 92#ifdef DEBUG
93static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len) 93static inline void lbs_deb_hex(unsigned int grp, const char *prompt,
94 const u8 *buf, int len)
94{ 95{
95 int i = 0; 96 int i = 0;
96 97
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index c7366b07b568..e446fed7b345 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -71,8 +71,10 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
71 71
72 skb->ip_summed = CHECKSUM_NONE; 72 skb->ip_summed = CHECKSUM_NONE;
73 73
74 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) 74 if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
75 return process_rxed_802_11_packet(priv, skb); 75 ret = process_rxed_802_11_packet(priv, skb);
76 goto done;
77 }
76 78
77 p_rx_pd = (struct rxpd *) skb->data; 79 p_rx_pd = (struct rxpd *) skb->data;
78 p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd + 80 p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
@@ -86,7 +88,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
86 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { 88 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
87 lbs_deb_rx("rx err: frame received with bad length\n"); 89 lbs_deb_rx("rx err: frame received with bad length\n");
88 dev->stats.rx_length_errors++; 90 dev->stats.rx_length_errors++;
89 ret = 0; 91 ret = -EINVAL;
90 dev_kfree_skb(skb); 92 dev_kfree_skb(skb);
91 goto done; 93 goto done;
92 } 94 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d7a52f5a410..a312c653d116 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
1676 return 0; 1676 return 0;
1677} 1677}
1678 1678
1679static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1679static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
1680 struct ieee80211_vif *vif,
1681 u32 queues, bool drop)
1680{ 1682{
1681 /* Not implemented, queues only on kernel side */ 1683 /* Not implemented, queues only on kernel side */
1682} 1684}
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2056 WIPHY_FLAG_AP_UAPSD | 2058 WIPHY_FLAG_AP_UAPSD |
2057 WIPHY_FLAG_HAS_CHANNEL_SWITCH; 2059 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
2058 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 2060 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
2061 hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
2059 2062
2060 /* ask mac80211 to reserve space for magic */ 2063 /* ask mac80211 to reserve space for magic */
2061 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 2064 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index c92f27aa71ed..706831df1fa2 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
212 sizeof(struct mwifiex_ie_types_header)); 212 sizeof(struct mwifiex_ie_types_header));
213 memcpy((u8 *)vht_op + 213 memcpy((u8 *)vht_op +
214 sizeof(struct mwifiex_ie_types_header), 214 sizeof(struct mwifiex_ie_types_header),
215 (u8 *)bss_desc->bcn_vht_oper + 215 (u8 *)bss_desc->bcn_vht_oper,
216 sizeof(struct ieee_types_header),
217 le16_to_cpu(vht_op->header.len)); 216 le16_to_cpu(vht_op->header.len));
218 217
219 /* negotiate the channel width and central freq 218 /* negotiate the channel width and central freq
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index d14ead8beca8..e1c2f67ae85e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
345 345
346 memcpy((u8 *) ht_info + 346 memcpy((u8 *) ht_info +
347 sizeof(struct mwifiex_ie_types_header), 347 sizeof(struct mwifiex_ie_types_header),
348 (u8 *) bss_desc->bcn_ht_oper + 348 (u8 *)bss_desc->bcn_ht_oper,
349 sizeof(struct ieee_types_header),
350 le16_to_cpu(ht_info->header.len)); 349 le16_to_cpu(ht_info->header.len));
351 350
352 if (!(sband->ht_cap.cap & 351 if (!(sband->ht_cap.cap &
@@ -750,3 +749,45 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
750 749
751 return; 750 return;
752} 751}
752
753u8 mwifiex_get_sec_chan_offset(int chan)
754{
755 u8 sec_offset;
756
757 switch (chan) {
758 case 36:
759 case 44:
760 case 52:
761 case 60:
762 case 100:
763 case 108:
764 case 116:
765 case 124:
766 case 132:
767 case 140:
768 case 149:
769 case 157:
770 sec_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
771 break;
772 case 40:
773 case 48:
774 case 56:
775 case 64:
776 case 104:
777 case 112:
778 case 120:
779 case 128:
780 case 136:
781 case 144:
782 case 153:
783 case 161:
784 sec_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
785 break;
786 case 165:
787 default:
788 sec_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
789 break;
790 }
791
792 return sec_offset;
793}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 40b007a00f4b..0b73fa08f5d4 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -63,6 +63,7 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
63 int cmd_action, 63 int cmd_action,
64 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl); 64 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
65void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra); 65void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
66u8 mwifiex_get_sec_chan_offset(int chan);
66 67
67static inline u8 68static inline u8
68mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv, 69mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
@@ -199,7 +200,7 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
199} 200}
200 201
201static inline u8 202static inline u8
202mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra) 203mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, const u8 *ra)
203{ 204{
204 struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra); 205 struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
205 if (node) 206 if (node)
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 63211707f939..5b32106182f8 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -100,6 +100,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
100 struct sk_buff *skb) 100 struct sk_buff *skb)
101{ 101{
102 struct txpd *local_tx_pd; 102 struct txpd *local_tx_pd;
103 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
103 104
104 skb_push(skb, sizeof(*local_tx_pd)); 105 skb_push(skb, sizeof(*local_tx_pd));
105 106
@@ -118,6 +119,9 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
118 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - 119 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
119 sizeof(*local_tx_pd)); 120 sizeof(*local_tx_pd));
120 121
122 if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
123 local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
124
121 if (local_tx_pd->tx_control == 0) 125 if (local_tx_pd->tx_control == 0)
122 /* TxCtrl set by user or default */ 126 /* TxCtrl set by user or default */
123 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); 127 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
@@ -160,6 +164,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 164 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 165 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 166 struct txpd *ptx_pd = NULL;
167 struct timeval tv;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN; 168 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
164 169
165 skb_src = skb_peek(&pra_list->skb_head); 170 skb_src = skb_peek(&pra_list->skb_head);
@@ -182,8 +187,14 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
182 187
183 tx_info_aggr->bss_type = tx_info_src->bss_type; 188 tx_info_aggr->bss_type = tx_info_src->bss_type;
184 tx_info_aggr->bss_num = tx_info_src->bss_num; 189 tx_info_aggr->bss_num = tx_info_src->bss_num;
190
191 if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
192 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
185 skb_aggr->priority = skb_src->priority; 193 skb_aggr->priority = skb_src->priority;
186 194
195 do_gettimeofday(&tv);
196 skb_aggr->tstamp = timeval_to_ktime(tv);
197
187 do { 198 do {
188 /* Check if AMSDU can accommodate this MSDU */ 199 /* Check if AMSDU can accommodate this MSDU */
189 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) 200 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +247,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
236 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, 247 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
237 skb_aggr, NULL); 248 skb_aggr, NULL);
238 } else { 249 } else {
239 /* 250 if (skb_src)
240 * Padding per MSDU will affect the length of next 251 tx_param.next_pkt_len =
241 * packet and hence the exact length of next packet 252 skb_src->len + sizeof(struct txpd);
242 * is uncertain here. 253 else
243 * 254 tx_param.next_pkt_len = 0;
244 * Also, aggregation of transmission buffer, while
245 * downloading the data to the card, wont gain much
246 * on the AMSDU packets as the AMSDU packets utilizes
247 * the transmission buffer space to the maximum
248 * (adapter->tx_buf_size).
249 */
250 tx_param.next_pkt_len = 0;
251 255
252 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 256 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
253 skb_aggr, &tx_param); 257 skb_aggr, &tx_param);
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b9242c3dca43..3b55ce5690a5 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -200,4 +200,11 @@ getlog
200 200
201 cat getlog 201 cat getlog
202 202
203fw_dump
204 This command is used to dump firmware memory into files.
205 Separate file will be created for each memory segment.
206 Usage:
207
208 cat fw_dump
209
203=============================================================================== 210===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 21ee27ab7b74..e95dec91a561 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -994,7 +994,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
994 */ 994 */
995static int 995static int
996mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, 996mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
997 u8 *mac, struct station_info *sinfo) 997 const u8 *mac, struct station_info *sinfo)
998{ 998{
999 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 999 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1000 1000
@@ -1270,7 +1270,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
1270 */ 1270 */
1271static int 1271static int
1272mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, 1272mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1273 u8 *mac) 1273 const u8 *mac)
1274{ 1274{
1275 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1275 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1276 struct mwifiex_sta_node *sta_node; 1276 struct mwifiex_sta_node *sta_node;
@@ -2629,7 +2629,7 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
2629 */ 2629 */
2630static int 2630static int
2631mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, 2631mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2632 u8 *peer, u8 action_code, u8 dialog_token, 2632 const u8 *peer, u8 action_code, u8 dialog_token,
2633 u16 status_code, u32 peer_capability, 2633 u16 status_code, u32 peer_capability,
2634 const u8 *extra_ies, size_t extra_ies_len) 2634 const u8 *extra_ies, size_t extra_ies_len)
2635{ 2635{
@@ -2701,7 +2701,7 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2701 2701
2702static int 2702static int
2703mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, 2703mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2704 u8 *peer, enum nl80211_tdls_operation action) 2704 const u8 *peer, enum nl80211_tdls_operation action)
2705{ 2705{
2706 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2706 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2707 2707
@@ -2748,9 +2748,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2748} 2748}
2749 2749
2750static int 2750static int
2751mwifiex_cfg80211_add_station(struct wiphy *wiphy, 2751mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2752 struct net_device *dev, 2752 const u8 *mac, struct station_parameters *params)
2753 u8 *mac, struct station_parameters *params)
2754{ 2753{
2755 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2754 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2756 2755
@@ -2765,9 +2764,9 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy,
2765} 2764}
2766 2765
2767static int 2766static int
2768mwifiex_cfg80211_change_station(struct wiphy *wiphy, 2767mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
2769 struct net_device *dev, 2768 const u8 *mac,
2770 u8 *mac, struct station_parameters *params) 2769 struct station_parameters *params)
2771{ 2770{
2772 int ret; 2771 int ret;
2773 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2772 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1062c918a7bf..8dee6c86f4f1 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
955 adapter->cmd_wait_q.status = -ETIMEDOUT; 955 adapter->cmd_wait_q.status = -ETIMEDOUT;
956 wake_up_interruptible(&adapter->cmd_wait_q.wait); 956 wake_up_interruptible(&adapter->cmd_wait_q.wait);
957 mwifiex_cancel_pending_ioctl(adapter); 957 mwifiex_cancel_pending_ioctl(adapter);
958 /* reset cmd_sent flag to unblock new commands */
959 adapter->cmd_sent = false;
960 } 958 }
961 } 959 }
962 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) 960 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index b8a49aad12fd..7b419bbcd544 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -257,6 +257,29 @@ free_and_exit:
257} 257}
258 258
259/* 259/*
260 * Proc firmware dump read handler.
261 *
262 * This function is called when the 'fw_dump' file is opened for
263 * reading.
264 * This function dumps firmware memory in different files
265 * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
266 * debugging.
267 */
268static ssize_t
269mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
270 size_t count, loff_t *ppos)
271{
272 struct mwifiex_private *priv = file->private_data;
273
274 if (!priv->adapter->if_ops.fw_dump)
275 return -EIO;
276
277 priv->adapter->if_ops.fw_dump(priv->adapter);
278
279 return 0;
280}
281
282/*
260 * Proc getlog file read handler. 283 * Proc getlog file read handler.
261 * 284 *
262 * This function is called when the 'getlog' file is opened for reading 285 * This function is called when the 'getlog' file is opened for reading
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
699MWIFIEX_DFS_FILE_READ_OPS(info); 722MWIFIEX_DFS_FILE_READ_OPS(info);
700MWIFIEX_DFS_FILE_READ_OPS(debug); 723MWIFIEX_DFS_FILE_READ_OPS(debug);
701MWIFIEX_DFS_FILE_READ_OPS(getlog); 724MWIFIEX_DFS_FILE_READ_OPS(getlog);
725MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
702MWIFIEX_DFS_FILE_OPS(regrdwr); 726MWIFIEX_DFS_FILE_OPS(regrdwr);
703MWIFIEX_DFS_FILE_OPS(rdeeprom); 727MWIFIEX_DFS_FILE_OPS(rdeeprom);
704 728
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
722 MWIFIEX_DFS_ADD_FILE(getlog); 746 MWIFIEX_DFS_ADD_FILE(getlog);
723 MWIFIEX_DFS_ADD_FILE(regrdwr); 747 MWIFIEX_DFS_ADD_FILE(regrdwr);
724 MWIFIEX_DFS_ADD_FILE(rdeeprom); 748 MWIFIEX_DFS_ADD_FILE(rdeeprom);
749 MWIFIEX_DFS_ADD_FILE(fw_dump);
725} 750}
726 751
727/* 752/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e7b3e16e5d34..38da6ff6f416 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -42,12 +42,12 @@
42#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 42#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
43#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 43#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
44 44
45#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 16 45#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
46#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 32 46#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
47#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32 47#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32
48#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16 48#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16
49#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 32 49#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 64
50#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 48 50#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 64
51#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48 51#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48
52#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32 52#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32
53 53
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index b485dc1ae5eb..3175dd04834b 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,6 +169,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
169#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) 169#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
170#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) 170#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
171#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) 171#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
172#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
172#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199) 173#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
173 174
174#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 175#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -229,6 +230,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
229#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8)) 230#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
230#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22)) 231#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
231#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30)) 232#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
233#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
232 234
233/* httxcfg bitmap 235/* httxcfg bitmap
234 * 0 reserved 236 * 0 reserved
@@ -403,7 +405,7 @@ enum P2P_MODES {
403#define HS_CFG_CANCEL 0xffffffff 405#define HS_CFG_CANCEL 0xffffffff
404#define HS_CFG_COND_DEF 0x00000000 406#define HS_CFG_COND_DEF 0x00000000
405#define HS_CFG_GPIO_DEF 0xff 407#define HS_CFG_GPIO_DEF 0xff
406#define HS_CFG_GAP_DEF 0 408#define HS_CFG_GAP_DEF 0xff
407#define HS_CFG_COND_BROADCAST_DATA 0x00000001 409#define HS_CFG_COND_BROADCAST_DATA 0x00000001
408#define HS_CFG_COND_UNICAST_DATA 0x00000002 410#define HS_CFG_COND_UNICAST_DATA 0x00000002
409#define HS_CFG_COND_MAC_EVENT 0x00000004 411#define HS_CFG_COND_MAC_EVENT 0x00000004
@@ -487,6 +489,7 @@ enum P2P_MODES {
487#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c 489#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
488#define EVENT_HOSTWAKE_STAIE 0x0000004d 490#define EVENT_HOSTWAKE_STAIE 0x0000004d
489#define EVENT_CHANNEL_SWITCH_ANN 0x00000050 491#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
492#define EVENT_TDLS_GENERIC_EVENT 0x00000052
490#define EVENT_EXT_SCAN_REPORT 0x00000058 493#define EVENT_EXT_SCAN_REPORT 0x00000058
491#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f 494#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
492 495
@@ -519,6 +522,7 @@ enum P2P_MODES {
519#define ACT_TDLS_DELETE 0x00 522#define ACT_TDLS_DELETE 0x00
520#define ACT_TDLS_CREATE 0x01 523#define ACT_TDLS_CREATE 0x01
521#define ACT_TDLS_CONFIG 0x02 524#define ACT_TDLS_CONFIG 0x02
525#define TDLS_EVENT_LINK_TEAR_DOWN 3
522 526
523#define MWIFIEX_FW_V15 15 527#define MWIFIEX_FW_V15 15
524 528
@@ -535,6 +539,7 @@ struct mwifiex_ie_types_data {
535#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01 539#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
536#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08 540#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
537#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10 541#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
542#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
538 543
539struct txpd { 544struct txpd {
540 u8 bss_type; 545 u8 bss_type;
@@ -577,7 +582,7 @@ struct rxpd {
577 * [Bit 7] Reserved 582 * [Bit 7] Reserved
578 */ 583 */
579 u8 ht_info; 584 u8 ht_info;
580 u8 reserved; 585 u8 flags;
581} __packed; 586} __packed;
582 587
583struct uap_txpd { 588struct uap_txpd {
@@ -708,6 +713,13 @@ struct mwifiex_ie_types_vendor_param_set {
708 u8 ie[MWIFIEX_MAX_VSIE_LEN]; 713 u8 ie[MWIFIEX_MAX_VSIE_LEN];
709}; 714};
710 715
716#define MWIFIEX_TDLS_IDLE_TIMEOUT 60
717
718struct mwifiex_ie_types_tdls_idle_timeout {
719 struct mwifiex_ie_types_header header;
720 __le16 value;
721} __packed;
722
711struct mwifiex_ie_types_rsn_param_set { 723struct mwifiex_ie_types_rsn_param_set {
712 struct mwifiex_ie_types_header header; 724 struct mwifiex_ie_types_header header;
713 u8 rsn_ie[1]; 725 u8 rsn_ie[1];
@@ -1745,6 +1757,15 @@ struct host_cmd_ds_802_11_subsc_evt {
1745 __le16 events; 1757 __le16 events;
1746} __packed; 1758} __packed;
1747 1759
1760struct mwifiex_tdls_generic_event {
1761 __le16 type;
1762 u8 peer_mac[ETH_ALEN];
1763 union {
1764 __le16 reason_code;
1765 __le16 reserved;
1766 } u;
1767} __packed;
1768
1748struct mwifiex_ie { 1769struct mwifiex_ie {
1749 __le16 ie_index; 1770 __le16 ie_index;
1750 __le16 mgmt_subtype_mask; 1771 __le16 mgmt_subtype_mask;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index ee494db54060..1b576722671d 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
303 u32 rx_ant; 303 u32 rx_ant;
304}; 304};
305 305
306#define MWIFIEX_NUM_OF_CMD_BUFFER 20 306#define MWIFIEX_NUM_OF_CMD_BUFFER 50
307#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048 307#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
308 308
309enum { 309enum {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c771b3e9918..cbabc12fbda3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -521,7 +521,6 @@ done:
521 release_firmware(adapter->firmware); 521 release_firmware(adapter->firmware);
522 adapter->firmware = NULL; 522 adapter->firmware = NULL;
523 } 523 }
524 complete(&adapter->fw_load);
525 if (init_failed) 524 if (init_failed)
526 mwifiex_free_adapter(adapter); 525 mwifiex_free_adapter(adapter);
527 up(sem); 526 up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
535{ 534{
536 int ret; 535 int ret;
537 536
538 init_completion(&adapter->fw_load);
539 ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name, 537 ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
540 adapter->dev, GFP_KERNEL, adapter, 538 adapter->dev, GFP_KERNEL, adapter,
541 mwifiex_fw_dpc); 539 mwifiex_fw_dpc);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d53e1e8c9467..1398afa84064 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
672 int (*init_fw_port) (struct mwifiex_adapter *); 672 int (*init_fw_port) (struct mwifiex_adapter *);
673 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); 673 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
674 void (*card_reset) (struct mwifiex_adapter *); 674 void (*card_reset) (struct mwifiex_adapter *);
675 void (*fw_dump)(struct mwifiex_adapter *);
675 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); 676 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
676}; 677};
677 678
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
787 struct mwifiex_wait_queue cmd_wait_q; 788 struct mwifiex_wait_queue cmd_wait_q;
788 u8 scan_wait_q_woken; 789 u8 scan_wait_q_woken;
789 spinlock_t queue_lock; /* lock for tx queues */ 790 spinlock_t queue_lock; /* lock for tx queues */
790 struct completion fw_load;
791 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 791 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
792 u16 max_mgmt_ie_index; 792 u16 max_mgmt_ie_index;
793 u8 scan_delay_cnt; 793 u8 scan_delay_cnt;
@@ -910,8 +910,6 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
910 struct sk_buff *skb); 910 struct sk_buff *skb);
911int mwifiex_process_sta_event(struct mwifiex_private *); 911int mwifiex_process_sta_event(struct mwifiex_private *);
912int mwifiex_process_uap_event(struct mwifiex_private *); 912int mwifiex_process_uap_event(struct mwifiex_private *);
913struct mwifiex_sta_node *
914mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
915void mwifiex_delete_all_station_list(struct mwifiex_private *priv); 913void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
916void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); 914void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
917void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb); 915void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
@@ -1101,7 +1099,7 @@ mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
1101 return 0; 1099 return 0;
1102 1100
1103 /* Clear csa channel, if DFS channel move time has passed */ 1101 /* Clear csa channel, if DFS channel move time has passed */
1104 if (jiffies > priv->csa_expire_time) { 1102 if (time_after(jiffies, priv->csa_expire_time)) {
1105 priv->csa_chan = 0; 1103 priv->csa_chan = 0;
1106 priv->csa_expire_time = 0; 1104 priv->csa_expire_time = 0;
1107 } 1105 }
@@ -1220,26 +1218,26 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
1220extern const struct ethtool_ops mwifiex_ethtool_ops; 1218extern const struct ethtool_ops mwifiex_ethtool_ops;
1221 1219
1222void mwifiex_del_all_sta_list(struct mwifiex_private *priv); 1220void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
1223void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac); 1221void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac);
1224void 1222void
1225mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies, 1223mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
1226 int ies_len, struct mwifiex_sta_node *node); 1224 int ies_len, struct mwifiex_sta_node *node);
1227struct mwifiex_sta_node * 1225struct mwifiex_sta_node *
1228mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac); 1226mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
1229struct mwifiex_sta_node * 1227struct mwifiex_sta_node *
1230mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac); 1228mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
1231int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer, 1229int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
1232 u8 action_code, u8 dialog_token, 1230 u8 action_code, u8 dialog_token,
1233 u16 status_code, const u8 *extra_ies, 1231 u16 status_code, const u8 *extra_ies,
1234 size_t extra_ies_len); 1232 size_t extra_ies_len);
1235int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, 1233int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
1236 u8 *peer, u8 action_code, u8 dialog_token, 1234 u8 action_code, u8 dialog_token,
1237 u16 status_code, const u8 *extra_ies, 1235 u16 status_code, const u8 *extra_ies,
1238 size_t extra_ies_len); 1236 size_t extra_ies_len);
1239void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, 1237void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1240 u8 *buf, int len); 1238 u8 *buf, int len);
1241int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action); 1239int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
1242int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac); 1240int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
1243void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv); 1241void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
1244bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv); 1242bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
1245u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band, 1243u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a7e8b96b2d90..574d4b597468 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
221 if (!adapter || !adapter->priv_num) 221 if (!adapter || !adapter->priv_num)
222 return; 222 return;
223 223
224 /* In case driver is removed when asynchronous FW load is in progress */
225 wait_for_completion(&adapter->fw_load);
226
227 if (user_rmmod) { 224 if (user_rmmod) {
228#ifdef CONFIG_PM_SLEEP 225#ifdef CONFIG_PM_SLEEP
229 if (adapter->is_suspended) 226 if (adapter->is_suspended)
@@ -1074,6 +1071,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
1074 * is mapped to PCI device memory. Tx ring pointers are advanced accordingly. 1071 * is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
1075 * Download ready interrupt to FW is deffered if Tx ring is not full and 1072 * Download ready interrupt to FW is deffered if Tx ring is not full and
1076 * additional payload can be accomodated. 1073 * additional payload can be accomodated.
1074 * Caller must ensure tx_param parameter to this function is not NULL.
1077 */ 1075 */
1078static int 1076static int
1079mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, 1077mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 7b3af3d29ded..45c5b3450cf5 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -29,9 +29,6 @@
29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14 29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
30 30
31#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4 31#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
32#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
33#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
34#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
35 32
36/* Memory needed to store a max sized Channel List TLV for a firmware scan */ 33/* Memory needed to store a max sized Channel List TLV for a firmware scan */
37#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \ 34#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
1055 1052
1056 /* 1053 /*
1057 * In associated state we will reduce the number of channels scanned per 1054 * In associated state we will reduce the number of channels scanned per
1058 * scan command to avoid any traffic delay/loss. This number is decided 1055 * scan command to 1 to avoid any traffic delay/loss.
1059 * based on total number of channels to be scanned due to constraints
1060 * of command buffers.
1061 */ 1056 */
1062 if (priv->media_connected) { 1057 if (priv->media_connected)
1063 if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
1064 *max_chan_per_scan = 1; 1058 *max_chan_per_scan = 1;
1065 else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
1066 *max_chan_per_scan = 2;
1067 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
1068 *max_chan_per_scan = 3;
1069 else
1070 *max_chan_per_scan = 4;
1071 }
1072} 1059}
1073 1060
1074/* 1061/*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1353 bss_entry->beacon_buf); 1340 bss_entry->beacon_buf);
1354 break; 1341 break;
1355 case WLAN_EID_BSS_COEX_2040: 1342 case WLAN_EID_BSS_COEX_2040:
1356 bss_entry->bcn_bss_co_2040 = current_ptr + 1343 bss_entry->bcn_bss_co_2040 = current_ptr;
1357 sizeof(struct ieee_types_header); 1344 bss_entry->bss_co_2040_offset =
1358 bss_entry->bss_co_2040_offset = (u16) (current_ptr + 1345 (u16) (current_ptr - bss_entry->beacon_buf);
1359 sizeof(struct ieee_types_header) -
1360 bss_entry->beacon_buf);
1361 break; 1346 break;
1362 case WLAN_EID_EXT_CAPABILITY: 1347 case WLAN_EID_EXT_CAPABILITY:
1363 bss_entry->bcn_ext_cap = current_ptr + 1348 bss_entry->bcn_ext_cap = current_ptr;
1364 sizeof(struct ieee_types_header); 1349 bss_entry->ext_cap_offset =
1365 bss_entry->ext_cap_offset = (u16) (current_ptr + 1350 (u16) (current_ptr - bss_entry->beacon_buf);
1366 sizeof(struct ieee_types_header) -
1367 bss_entry->beacon_buf);
1368 break; 1351 break;
1369 case WLAN_EID_OPMODE_NOTIF: 1352 case WLAN_EID_OPMODE_NOTIF:
1370 bss_entry->oper_mode = 1353 bss_entry->oper_mode = (void *)current_ptr;
1371 (void *)(current_ptr +
1372 sizeof(struct ieee_types_header));
1373 bss_entry->oper_mode_offset = 1354 bss_entry->oper_mode_offset =
1374 (u16)((u8 *)bss_entry->oper_mode - 1355 (u16)((u8 *)bss_entry->oper_mode -
1375 bss_entry->beacon_buf); 1356 bss_entry->beacon_buf);
@@ -1757,6 +1738,19 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
1757 return 0; 1738 return 0;
1758} 1739}
1759 1740
1741static void mwifiex_complete_scan(struct mwifiex_private *priv)
1742{
1743 struct mwifiex_adapter *adapter = priv->adapter;
1744
1745 if (adapter->curr_cmd->wait_q_enabled) {
1746 adapter->cmd_wait_q.status = 0;
1747 if (!priv->scan_request) {
1748 dev_dbg(adapter->dev, "complete internal scan\n");
1749 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
1750 }
1751 }
1752}
1753
1760static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) 1754static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1761{ 1755{
1762 struct mwifiex_adapter *adapter = priv->adapter; 1756 struct mwifiex_adapter *adapter = priv->adapter;
@@ -1770,16 +1764,9 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1770 adapter->scan_processing = false; 1764 adapter->scan_processing = false;
1771 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 1765 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
1772 1766
1773 /* Need to indicate IOCTL complete */ 1767 if (!adapter->ext_scan)
1774 if (adapter->curr_cmd->wait_q_enabled) { 1768 mwifiex_complete_scan(priv);
1775 adapter->cmd_wait_q.status = 0; 1769
1776 if (!priv->scan_request) {
1777 dev_dbg(adapter->dev,
1778 "complete internal scan\n");
1779 mwifiex_complete_cmd(adapter,
1780 adapter->curr_cmd);
1781 }
1782 }
1783 if (priv->report_scan_result) 1770 if (priv->report_scan_result)
1784 priv->report_scan_result = false; 1771 priv->report_scan_result = false;
1785 1772
@@ -1984,6 +1971,9 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
1984int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv) 1971int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
1985{ 1972{
1986 dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n"); 1973 dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
1974
1975 mwifiex_complete_scan(priv);
1976
1987 return 0; 1977 return 0;
1988} 1978}
1989 1979
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d206f04d4994..4ce3d7b33991 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
85 card->supports_sdio_new_mode = data->supports_sdio_new_mode; 85 card->supports_sdio_new_mode = data->supports_sdio_new_mode;
86 card->has_control_mask = data->has_control_mask; 86 card->has_control_mask = data->has_control_mask;
87 card->tx_buf_size = data->tx_buf_size; 87 card->tx_buf_size = data->tx_buf_size;
88 card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
89 card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
88 } 90 }
89 91
90 sdio_claim_host(func); 92 sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
177 if (!adapter || !adapter->priv_num) 179 if (!adapter || !adapter->priv_num)
178 return; 180 return;
179 181
180 /* In case driver is removed when asynchronous FW load is in progress */
181 wait_for_completion(&adapter->fw_load);
182
183 if (user_rmmod) { 182 if (user_rmmod) {
184 if (adapter->is_suspended) 183 if (adapter->is_suspended)
185 mwifiex_sdio_resume(adapter->dev); 184 mwifiex_sdio_resume(adapter->dev);
@@ -1679,8 +1678,12 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1679 if (ret) { 1678 if (ret) {
1680 if (type == MWIFIEX_TYPE_CMD) 1679 if (type == MWIFIEX_TYPE_CMD)
1681 adapter->cmd_sent = false; 1680 adapter->cmd_sent = false;
1682 if (type == MWIFIEX_TYPE_DATA) 1681 if (type == MWIFIEX_TYPE_DATA) {
1683 adapter->data_sent = false; 1682 adapter->data_sent = false;
1683 /* restore curr_wr_port in error cases */
1684 card->curr_wr_port = port;
1685 card->mp_wr_bitmap |= (u32)(1 << card->curr_wr_port);
1686 }
1684 } else { 1687 } else {
1685 if (type == MWIFIEX_TYPE_DATA) { 1688 if (type == MWIFIEX_TYPE_DATA) {
1686 if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port))) 1689 if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
@@ -1842,8 +1845,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1842 card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) * 1845 card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
1843 card->mp_agg_pkt_limit, GFP_KERNEL); 1846 card->mp_agg_pkt_limit, GFP_KERNEL);
1844 ret = mwifiex_alloc_sdio_mpa_buffers(adapter, 1847 ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
1845 SDIO_MP_TX_AGGR_DEF_BUF_SIZE, 1848 card->mp_tx_agg_buf_size,
1846 SDIO_MP_RX_AGGR_DEF_BUF_SIZE); 1849 card->mp_rx_agg_buf_size);
1847 if (ret) { 1850 if (ret) {
1848 dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n"); 1851 dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
1849 kfree(card->mp_regs); 1852 kfree(card->mp_regs);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index c71201b2e2a3..6eea30b43ed7 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -64,10 +64,8 @@
64#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U) 64#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
65#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U) 65#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
66 66
67#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */ 67#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
68 68#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
69/* Multi port RX aggregation buffer size */
70#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */
71 69
72/* Misc. Config Register : Auto Re-enable interrupts */ 70/* Misc. Config Register : Auto Re-enable interrupts */
73#define AUTO_RE_ENABLE_INT BIT(4) 71#define AUTO_RE_ENABLE_INT BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
234 bool supports_sdio_new_mode; 232 bool supports_sdio_new_mode;
235 bool has_control_mask; 233 bool has_control_mask;
236 u16 tx_buf_size; 234 u16 tx_buf_size;
235 u32 mp_tx_agg_buf_size;
236 u32 mp_rx_agg_buf_size;
237 237
238 u32 mp_rd_bitmap; 238 u32 mp_rd_bitmap;
239 u32 mp_wr_bitmap; 239 u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
258 bool supports_sdio_new_mode; 258 bool supports_sdio_new_mode;
259 bool has_control_mask; 259 bool has_control_mask;
260 u16 tx_buf_size; 260 u16 tx_buf_size;
261 u32 mp_tx_agg_buf_size;
262 u32 mp_rx_agg_buf_size;
261}; 263};
262 264
263static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { 265static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
315 .supports_sdio_new_mode = false, 317 .supports_sdio_new_mode = false,
316 .has_control_mask = true, 318 .has_control_mask = true,
317 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 319 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
320 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
321 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
318}; 322};
319 323
320static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { 324static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
325 .supports_sdio_new_mode = false, 329 .supports_sdio_new_mode = false,
326 .has_control_mask = true, 330 .has_control_mask = true,
327 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 331 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
332 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
333 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
328}; 334};
329 335
330static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { 336static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
335 .supports_sdio_new_mode = false, 341 .supports_sdio_new_mode = false,
336 .has_control_mask = true, 342 .has_control_mask = true,
337 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 343 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
344 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
345 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
338}; 346};
339 347
340static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { 348static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
345 .supports_sdio_new_mode = true, 353 .supports_sdio_new_mode = true,
346 .has_control_mask = false, 354 .has_control_mask = false,
347 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 355 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
356 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
357 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
348}; 358};
349 359
350/* 360/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index e3cac1495cc7..88202ce0c139 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1546,6 +1546,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
1546 struct mwifiex_ie_types_extcap *extcap; 1546 struct mwifiex_ie_types_extcap *extcap;
1547 struct mwifiex_ie_types_vhtcap *vht_capab; 1547 struct mwifiex_ie_types_vhtcap *vht_capab;
1548 struct mwifiex_ie_types_aid *aid; 1548 struct mwifiex_ie_types_aid *aid;
1549 struct mwifiex_ie_types_tdls_idle_timeout *timeout;
1549 u8 *pos, qos_info; 1550 u8 *pos, qos_info;
1550 u16 config_len = 0; 1551 u16 config_len = 0;
1551 struct station_parameters *params = priv->sta_params; 1552 struct station_parameters *params = priv->sta_params;
@@ -1643,6 +1644,12 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
1643 config_len += sizeof(struct mwifiex_ie_types_aid); 1644 config_len += sizeof(struct mwifiex_ie_types_aid);
1644 } 1645 }
1645 1646
1647 timeout = (void *)(pos + config_len);
1648 timeout->header.type = cpu_to_le16(TLV_TYPE_TDLS_IDLE_TIMEOUT);
1649 timeout->header.len = cpu_to_le16(sizeof(timeout->value));
1650 timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT);
1651 config_len += sizeof(struct mwifiex_ie_types_tdls_idle_timeout);
1652
1646 break; 1653 break;
1647 default: 1654 default:
1648 dev_err(priv->adapter->dev, "Unknown TDLS operation\n"); 1655 dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index bfebb0144df5..577f2979ed8f 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -865,14 +865,20 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
865 865
866 switch (action) { 866 switch (action) {
867 case ACT_TDLS_DELETE: 867 case ACT_TDLS_DELETE:
868 if (reason) 868 if (reason) {
869 dev_err(priv->adapter->dev, 869 if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
870 "TDLS link delete for %pM failed: reason %d\n", 870 dev_dbg(priv->adapter->dev,
871 cmd_tdls_oper->peer_mac, reason); 871 "TDLS link delete for %pM failed: reason %d\n",
872 else 872 cmd_tdls_oper->peer_mac, reason);
873 else
874 dev_err(priv->adapter->dev,
875 "TDLS link delete for %pM failed: reason %d\n",
876 cmd_tdls_oper->peer_mac, reason);
877 } else {
873 dev_dbg(priv->adapter->dev, 878 dev_dbg(priv->adapter->dev,
874 "TDLS link config for %pM successful\n", 879 "TDLS link delete for %pM successful\n",
875 cmd_tdls_oper->peer_mac); 880 cmd_tdls_oper->peer_mac);
881 }
876 break; 882 break;
877 case ACT_TDLS_CREATE: 883 case ACT_TDLS_CREATE:
878 if (reason) { 884 if (reason) {
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 368450cc56c7..f6395ef11a72 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -134,6 +134,46 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
134 netif_carrier_off(priv->netdev); 134 netif_carrier_off(priv->netdev);
135} 135}
136 136
137static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
138 struct sk_buff *event_skb)
139{
140 int ret = 0;
141 struct mwifiex_adapter *adapter = priv->adapter;
142 struct mwifiex_sta_node *sta_ptr;
143 struct mwifiex_tdls_generic_event *tdls_evt =
144 (void *)event_skb->data + sizeof(adapter->event_cause);
145
146 /* reserved 2 bytes are not mandatory in tdls event */
147 if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
148 sizeof(u16) - sizeof(adapter->event_cause))) {
149 dev_err(adapter->dev, "Invalid event length!\n");
150 return -1;
151 }
152
153 sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
154 if (!sta_ptr) {
155 dev_err(adapter->dev, "cannot get sta entry!\n");
156 return -1;
157 }
158
159 switch (le16_to_cpu(tdls_evt->type)) {
160 case TDLS_EVENT_LINK_TEAR_DOWN:
161 cfg80211_tdls_oper_request(priv->netdev,
162 tdls_evt->peer_mac,
163 NL80211_TDLS_TEARDOWN,
164 le16_to_cpu(tdls_evt->u.reason_code),
165 GFP_KERNEL);
166 ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
167 MWIFIEX_TDLS_DISABLE_LINK);
168 queue_work(adapter->workqueue, &adapter->main_work);
169 break;
170 default:
171 break;
172 }
173
174 return ret;
175}
176
137/* 177/*
138 * This function handles events generated by firmware. 178 * This function handles events generated by firmware.
139 * 179 *
@@ -459,6 +499,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
459 false); 499 false);
460 break; 500 break;
461 501
502 case EVENT_TDLS_GENERIC_EVENT:
503 ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
504 break;
505
462 default: 506 default:
463 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 507 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
464 eventcause); 508 eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index ed26387eccf5..8b639d7fe6df 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -183,6 +183,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
183 struct rx_packet_hdr *rx_pkt_hdr; 183 struct rx_packet_hdr *rx_pkt_hdr;
184 u8 ta[ETH_ALEN]; 184 u8 ta[ETH_ALEN];
185 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num; 185 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
186 struct mwifiex_sta_node *sta_ptr;
186 187
187 local_rx_pd = (struct rxpd *) (skb->data); 188 local_rx_pd = (struct rxpd *) (skb->data);
188 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type); 189 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -213,14 +214,25 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
213 * If the packet is not an unicast packet then send the packet 214 * If the packet is not an unicast packet then send the packet
214 * directly to os. Don't pass thru rx reordering 215 * directly to os. Don't pass thru rx reordering
215 */ 216 */
216 if (!IS_11N_ENABLED(priv) || 217 if ((!IS_11N_ENABLED(priv) &&
218 !(ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
219 !(local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET))) ||
217 !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) { 220 !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
218 mwifiex_process_rx_packet(priv, skb); 221 mwifiex_process_rx_packet(priv, skb);
219 return ret; 222 return ret;
220 } 223 }
221 224
222 if (mwifiex_queuing_ra_based(priv)) { 225 if (mwifiex_queuing_ra_based(priv) ||
226 (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
227 local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET)) {
223 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); 228 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
229 if (local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET &&
230 local_rx_pd->priority < MAX_NUM_TID) {
231 sta_ptr = mwifiex_get_sta_entry(priv, ta);
232 if (sta_ptr)
233 sta_ptr->rx_seq[local_rx_pd->priority] =
234 le16_to_cpu(local_rx_pd->seq_num);
235 }
224 } else { 236 } else {
225 if (rx_pkt_type != PKT_TYPE_BAR) 237 if (rx_pkt_type != PKT_TYPE_BAR)
226 priv->rx_seq[local_rx_pd->priority] = seq_num; 238 priv->rx_seq[local_rx_pd->priority] = seq_num;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 1236a5de7bca..5fce7e78a36e 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -128,6 +128,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
128{ 128{
129 struct mwifiex_adapter *adapter = priv->adapter; 129 struct mwifiex_adapter *adapter = priv->adapter;
130 struct txpd *local_tx_pd; 130 struct txpd *local_tx_pd;
131 struct mwifiex_tx_param tx_param;
131/* sizeof(struct txpd) + Interface specific header */ 132/* sizeof(struct txpd) + Interface specific header */
132#define NULL_PACKET_HDR 64 133#define NULL_PACKET_HDR 64
133 u32 data_len = NULL_PACKET_HDR; 134 u32 data_len = NULL_PACKET_HDR;
@@ -168,8 +169,9 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
168 skb, NULL); 169 skb, NULL);
169 } else { 170 } else {
170 skb_push(skb, INTF_HEADER_LEN); 171 skb_push(skb, INTF_HEADER_LEN);
172 tx_param.next_pkt_len = 0;
171 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 173 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
172 skb, NULL); 174 skb, &tx_param);
173 } 175 }
174 switch (ret) { 176 switch (ret) {
175 case -EBUSY: 177 case -EBUSY:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 97662a1ba58c..e73034fbbde9 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -25,8 +25,8 @@
25#define TDLS_RESP_FIX_LEN 8 25#define TDLS_RESP_FIX_LEN 8
26#define TDLS_CONFIRM_FIX_LEN 6 26#define TDLS_CONFIRM_FIX_LEN 6
27 27
28static void 28static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
29mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status) 29 const u8 *mac, u8 status)
30{ 30{
31 struct mwifiex_ra_list_tbl *ra_list; 31 struct mwifiex_ra_list_tbl *ra_list;
32 struct list_head *tid_list; 32 struct list_head *tid_list;
@@ -84,7 +84,8 @@ mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
84 return; 84 return;
85} 85}
86 86
87static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac) 87static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
88 const u8 *mac)
88{ 89{
89 struct mwifiex_ra_list_tbl *ra_list; 90 struct mwifiex_ra_list_tbl *ra_list;
90 struct list_head *ra_list_head; 91 struct list_head *ra_list_head;
@@ -185,8 +186,50 @@ static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
185 return 0; 186 return 0;
186} 187}
187 188
189static int
190mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
191 u8 vht_enabled, struct sk_buff *skb)
192{
193 struct ieee80211_ht_operation *ht_oper;
194 struct mwifiex_sta_node *sta_ptr;
195 struct mwifiex_bssdescriptor *bss_desc =
196 &priv->curr_bss_params.bss_descriptor;
197 u8 *pos;
198
199 sta_ptr = mwifiex_get_sta_entry(priv, mac);
200 if (unlikely(!sta_ptr)) {
201 dev_warn(priv->adapter->dev,
202 "TDLS peer station not found in list\n");
203 return -1;
204 }
205
206 pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
207 *pos++ = WLAN_EID_HT_OPERATION;
208 *pos++ = sizeof(struct ieee80211_ht_operation);
209 ht_oper = (void *)pos;
210
211 ht_oper->primary_chan = bss_desc->channel;
212
213 /* follow AP's channel bandwidth */
214 if (ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
215 bss_desc->bcn_ht_cap &&
216 ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_oper->ht_param))
217 ht_oper->ht_param = bss_desc->bcn_ht_oper->ht_param;
218
219 if (vht_enabled) {
220 ht_oper->ht_param =
221 mwifiex_get_sec_chan_offset(bss_desc->channel);
222 ht_oper->ht_param |= BIT(2);
223 }
224
225 memcpy(&sta_ptr->tdls_cap.ht_oper, ht_oper,
226 sizeof(struct ieee80211_ht_operation));
227
228 return 0;
229}
230
188static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv, 231static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
189 u8 *mac, struct sk_buff *skb) 232 const u8 *mac, struct sk_buff *skb)
190{ 233{
191 struct mwifiex_bssdescriptor *bss_desc; 234 struct mwifiex_bssdescriptor *bss_desc;
192 struct ieee80211_vht_operation *vht_oper; 235 struct ieee80211_vht_operation *vht_oper;
@@ -325,8 +368,9 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
325} 368}
326 369
327static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, 370static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
328 u8 *peer, u8 action_code, u8 dialog_token, 371 const u8 *peer, u8 action_code,
329 u16 status_code, struct sk_buff *skb) 372 u8 dialog_token,
373 u16 status_code, struct sk_buff *skb)
330{ 374{
331 struct ieee80211_tdls_data *tf; 375 struct ieee80211_tdls_data *tf;
332 int ret; 376 int ret;
@@ -428,6 +472,17 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
428 dev_kfree_skb_any(skb); 472 dev_kfree_skb_any(skb);
429 return ret; 473 return ret;
430 } 474 }
475 ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
476 if (ret) {
477 dev_kfree_skb_any(skb);
478 return ret;
479 }
480 } else {
481 ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
482 if (ret) {
483 dev_kfree_skb_any(skb);
484 return ret;
485 }
431 } 486 }
432 break; 487 break;
433 488
@@ -453,7 +508,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
453} 508}
454 509
455static void 510static void
456mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid) 511mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
512 const u8 *peer, const u8 *bssid)
457{ 513{
458 struct ieee80211_tdls_lnkie *lnkid; 514 struct ieee80211_tdls_lnkie *lnkid;
459 515
@@ -467,8 +523,8 @@ mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
467 memcpy(lnkid->resp_sta, peer, ETH_ALEN); 523 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
468} 524}
469 525
470int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, 526int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
471 u8 *peer, u8 action_code, u8 dialog_token, 527 u8 action_code, u8 dialog_token,
472 u16 status_code, const u8 *extra_ies, 528 u16 status_code, const u8 *extra_ies,
473 size_t extra_ies_len) 529 size_t extra_ies_len)
474{ 530{
@@ -560,7 +616,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
560} 616}
561 617
562static int 618static int
563mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer, 619mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
620 const u8 *peer,
564 u8 action_code, u8 dialog_token, 621 u8 action_code, u8 dialog_token,
565 u16 status_code, struct sk_buff *skb) 622 u16 status_code, struct sk_buff *skb)
566{ 623{
@@ -638,10 +695,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
638 return 0; 695 return 0;
639} 696}
640 697
641int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, 698int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
642 u8 *peer, u8 action_code, u8 dialog_token, 699 u8 action_code, u8 dialog_token,
643 u16 status_code, const u8 *extra_ies, 700 u16 status_code, const u8 *extra_ies,
644 size_t extra_ies_len) 701 size_t extra_ies_len)
645{ 702{
646 struct sk_buff *skb; 703 struct sk_buff *skb;
647 struct mwifiex_txinfo *tx_info; 704 struct mwifiex_txinfo *tx_info;
@@ -848,7 +905,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
848} 905}
849 906
850static int 907static int
851mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer) 908mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
852{ 909{
853 struct mwifiex_sta_node *sta_ptr; 910 struct mwifiex_sta_node *sta_ptr;
854 struct mwifiex_ds_tdls_oper tdls_oper; 911 struct mwifiex_ds_tdls_oper tdls_oper;
@@ -869,7 +926,7 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
869} 926}
870 927
871static int 928static int
872mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer) 929mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
873{ 930{
874 struct mwifiex_sta_node *sta_ptr; 931 struct mwifiex_sta_node *sta_ptr;
875 struct mwifiex_ds_tdls_oper tdls_oper; 932 struct mwifiex_ds_tdls_oper tdls_oper;
@@ -896,7 +953,7 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
896} 953}
897 954
898static int 955static int
899mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer) 956mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
900{ 957{
901 struct mwifiex_sta_node *sta_ptr; 958 struct mwifiex_sta_node *sta_ptr;
902 struct mwifiex_ds_tdls_oper tdls_oper; 959 struct mwifiex_ds_tdls_oper tdls_oper;
@@ -925,7 +982,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
925} 982}
926 983
927static int 984static int
928mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer) 985mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
929{ 986{
930 struct mwifiex_sta_node *sta_ptr; 987 struct mwifiex_sta_node *sta_ptr;
931 struct ieee80211_mcs_info mcs; 988 struct ieee80211_mcs_info mcs;
@@ -982,7 +1039,7 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
982 return 0; 1039 return 0;
983} 1040}
984 1041
985int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action) 1042int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
986{ 1043{
987 switch (action) { 1044 switch (action) {
988 case MWIFIEX_TDLS_ENABLE_LINK: 1045 case MWIFIEX_TDLS_ENABLE_LINK:
@@ -997,7 +1054,7 @@ int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
997 return 0; 1054 return 0;
998} 1055}
999 1056
1000int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac) 1057int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
1001{ 1058{
1002 struct mwifiex_sta_node *sta_ptr; 1059 struct mwifiex_sta_node *sta_ptr;
1003 1060
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 9be6544bdded..32643555dd2a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
175 switch (GET_RXSTBC(cap_info)) { 175 switch (GET_RXSTBC(cap_info)) {
176 case MWIFIEX_RX_STBC1: 176 case MWIFIEX_RX_STBC1:
177 /* HT_CAP 1X1 mode */ 177 /* HT_CAP 1X1 mode */
178 memset(&bss_cfg->ht_cap.mcs, 0xff, 1); 178 bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
179 break; 179 break;
180 case MWIFIEX_RX_STBC12: /* fall through */ 180 case MWIFIEX_RX_STBC12: /* fall through */
181 case MWIFIEX_RX_STBC123: 181 case MWIFIEX_RX_STBC123:
182 /* HT_CAP 2X2 mode */ 182 /* HT_CAP 2X2 mode */
183 memset(&bss_cfg->ht_cap.mcs, 0xff, 2); 183 bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
184 bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
184 break; 185 break;
185 default: 186 default:
186 dev_warn(priv->adapter->dev, 187 dev_warn(priv->adapter->dev,
187 "Unsupported RX-STBC, default to 2x2\n"); 188 "Unsupported RX-STBC, default to 2x2\n");
188 memset(&bss_cfg->ht_cap.mcs, 0xff, 2); 189 bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
190 bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
189 break; 191 break;
190 } 192 }
191 priv->ap_11n_enabled = 1; 193 priv->ap_11n_enabled = 1;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edbe4aff00d8..a8ce8130cfae 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,9 +22,9 @@
22 22
23#define USB_VERSION "1.0" 23#define USB_VERSION "1.0"
24 24
25static u8 user_rmmod;
25static struct mwifiex_if_ops usb_ops; 26static struct mwifiex_if_ops usb_ops;
26static struct semaphore add_remove_card_sem; 27static struct semaphore add_remove_card_sem;
27static struct usb_card_rec *usb_card;
28 28
29static struct usb_device_id mwifiex_usb_table[] = { 29static struct usb_device_id mwifiex_usb_table[] = {
30 /* 8797 */ 30 /* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
532static void mwifiex_usb_disconnect(struct usb_interface *intf) 532static void mwifiex_usb_disconnect(struct usb_interface *intf)
533{ 533{
534 struct usb_card_rec *card = usb_get_intfdata(intf); 534 struct usb_card_rec *card = usb_get_intfdata(intf);
535 struct mwifiex_adapter *adapter;
535 536
536 if (!card) { 537 if (!card || !card->adapter) {
537 pr_err("%s: card is NULL\n", __func__); 538 pr_err("%s: card or card->adapter is NULL\n", __func__);
538 return; 539 return;
539 } 540 }
540 541
541 mwifiex_usb_free(card); 542 adapter = card->adapter;
543 if (!adapter->priv_num)
544 return;
542 545
543 if (card->adapter) { 546 if (user_rmmod) {
544 struct mwifiex_adapter *adapter = card->adapter; 547#ifdef CONFIG_PM
548 if (adapter->is_suspended)
549 mwifiex_usb_resume(intf);
550#endif
545 551
546 if (!adapter->priv_num) 552 mwifiex_deauthenticate_all(adapter);
547 return;
548 553
549 dev_dbg(adapter->dev, "%s: removing card\n", __func__); 554 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
550 mwifiex_remove_card(adapter, &add_remove_card_sem); 555 MWIFIEX_BSS_ROLE_ANY),
556 MWIFIEX_FUNC_SHUTDOWN);
551 } 557 }
552 558
559 mwifiex_usb_free(card);
560
561 dev_dbg(adapter->dev, "%s: removing card\n", __func__);
562 mwifiex_remove_card(adapter, &add_remove_card_sem);
563
553 usb_set_intfdata(intf, NULL); 564 usb_set_intfdata(intf, NULL);
554 usb_put_dev(interface_to_usbdev(intf)); 565 usb_put_dev(interface_to_usbdev(intf));
555 kfree(card); 566 kfree(card);
556 usb_card = NULL;
557 567
558 return; 568 return;
559} 569}
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
565 .id_table = mwifiex_usb_table, 575 .id_table = mwifiex_usb_table,
566 .suspend = mwifiex_usb_suspend, 576 .suspend = mwifiex_usb_suspend,
567 .resume = mwifiex_usb_resume, 577 .resume = mwifiex_usb_resume,
578 .soft_unbind = 1,
568}; 579};
569 580
570static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) 581static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
762 773
763 card->adapter = adapter; 774 card->adapter = adapter;
764 adapter->dev = &card->udev->dev; 775 adapter->dev = &card->udev->dev;
765 usb_card = card;
766 776
767 switch (le16_to_cpu(card->udev->descriptor.idProduct)) { 777 switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
768 case USB8897_PID_1: 778 case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
1025 if (!down_interruptible(&add_remove_card_sem)) 1035 if (!down_interruptible(&add_remove_card_sem))
1026 up(&add_remove_card_sem); 1036 up(&add_remove_card_sem);
1027 1037
1028 if (usb_card && usb_card->adapter) { 1038 /* set the flag as user is removing this module */
1029 struct mwifiex_adapter *adapter = usb_card->adapter; 1039 user_rmmod = 1;
1030
1031 /* In case driver is removed when asynchronous FW downloading is
1032 * in progress
1033 */
1034 wait_for_completion(&adapter->fw_load);
1035
1036#ifdef CONFIG_PM
1037 if (adapter->is_suspended)
1038 mwifiex_usb_resume(usb_card->intf);
1039#endif
1040
1041 mwifiex_deauthenticate_all(adapter);
1042
1043 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
1044 MWIFIEX_BSS_ROLE_ANY),
1045 MWIFIEX_FUNC_SHUTDOWN);
1046 }
1047 1040
1048 usb_deregister(&mwifiex_usb_driver); 1041 usb_deregister(&mwifiex_usb_driver);
1049} 1042}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index c3824e37f3f2..6da5abf52e61 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -259,7 +259,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
259 * NULL is returned if station entry is not found in associated STA list. 259 * NULL is returned if station entry is not found in associated STA list.
260 */ 260 */
261struct mwifiex_sta_node * 261struct mwifiex_sta_node *
262mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac) 262mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
263{ 263{
264 struct mwifiex_sta_node *node; 264 struct mwifiex_sta_node *node;
265 265
@@ -280,7 +280,7 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
280 * If received mac address is NULL, NULL is returned. 280 * If received mac address is NULL, NULL is returned.
281 */ 281 */
282struct mwifiex_sta_node * 282struct mwifiex_sta_node *
283mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac) 283mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
284{ 284{
285 struct mwifiex_sta_node *node; 285 struct mwifiex_sta_node *node;
286 unsigned long flags; 286 unsigned long flags;
@@ -332,7 +332,7 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
332} 332}
333 333
334/* This function will delete a station entry from station list */ 334/* This function will delete a station entry from station list */
335void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac) 335void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
336{ 336{
337 struct mwifiex_sta_node *node; 337 struct mwifiex_sta_node *node;
338 unsigned long flags; 338 unsigned long flags;
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 0a7cc742aed7..d3671d009f6c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -92,7 +92,7 @@ mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
92 * The function also initializes the list with the provided RA. 92 * The function also initializes the list with the provided RA.
93 */ 93 */
94static struct mwifiex_ra_list_tbl * 94static struct mwifiex_ra_list_tbl *
95mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra) 95mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
96{ 96{
97 struct mwifiex_ra_list_tbl *ra_list; 97 struct mwifiex_ra_list_tbl *ra_list;
98 98
@@ -139,8 +139,7 @@ static u8 mwifiex_get_random_ba_threshold(void)
139 * This function allocates and adds a RA list for all TIDs 139 * This function allocates and adds a RA list for all TIDs
140 * with the given RA. 140 * with the given RA.
141 */ 141 */
142void 142void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
143mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
144{ 143{
145 int i; 144 int i;
146 struct mwifiex_ra_list_tbl *ra_list; 145 struct mwifiex_ra_list_tbl *ra_list;
@@ -164,6 +163,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
164 if (!mwifiex_queuing_ra_based(priv)) { 163 if (!mwifiex_queuing_ra_based(priv)) {
165 if (mwifiex_get_tdls_link_status(priv, ra) == 164 if (mwifiex_get_tdls_link_status(priv, ra) ==
166 TDLS_SETUP_COMPLETE) { 165 TDLS_SETUP_COMPLETE) {
166 ra_list->tdls_link = true;
167 ra_list->is_11n_enabled = 167 ra_list->is_11n_enabled =
168 mwifiex_tdls_peer_11n_enabled(priv, ra); 168 mwifiex_tdls_peer_11n_enabled(priv, ra);
169 } else { 169 } else {
@@ -426,15 +426,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
426 priv->tos_to_tid_inv[i]; 426 priv->tos_to_tid_inv[i];
427 } 427 }
428 428
429 priv->aggr_prio_tbl[6].amsdu
430 = priv->aggr_prio_tbl[6].ampdu_ap
431 = priv->aggr_prio_tbl[6].ampdu_user
432 = BA_STREAM_NOT_ALLOWED;
433
434 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
435 = priv->aggr_prio_tbl[7].ampdu_user
436 = BA_STREAM_NOT_ALLOWED;
437
438 mwifiex_set_ba_params(priv); 429 mwifiex_set_ba_params(priv);
439 mwifiex_reset_11n_rx_seq_num(priv); 430 mwifiex_reset_11n_rx_seq_num(priv);
440 431
@@ -575,7 +566,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
575 */ 566 */
576static struct mwifiex_ra_list_tbl * 567static struct mwifiex_ra_list_tbl *
577mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, 568mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
578 u8 *ra_addr) 569 const u8 *ra_addr)
579{ 570{
580 struct mwifiex_ra_list_tbl *ra_list; 571 struct mwifiex_ra_list_tbl *ra_list;
581 572
@@ -596,7 +587,8 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
596 * retrieved. 587 * retrieved.
597 */ 588 */
598struct mwifiex_ra_list_tbl * 589struct mwifiex_ra_list_tbl *
599mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr) 590mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
591 const u8 *ra_addr)
600{ 592{
601 struct mwifiex_ra_list_tbl *ra_list; 593 struct mwifiex_ra_list_tbl *ra_list;
602 594
@@ -657,7 +649,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
657 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS) 649 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
658 dev_dbg(adapter->dev, 650 dev_dbg(adapter->dev,
659 "TDLS setup packet for %pM. Don't block\n", ra); 651 "TDLS setup packet for %pM. Don't block\n", ra);
660 else 652 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
661 tdls_status = mwifiex_get_tdls_link_status(priv, ra); 653 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
662 } 654 }
663 655
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 83e42083ebff..eca56e371a57 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,7 +99,7 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
99 99
100void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, 100void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
101 struct sk_buff *skb); 101 struct sk_buff *skb);
102void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra); 102void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
103void mwifiex_rotate_priolists(struct mwifiex_private *priv, 103void mwifiex_rotate_priolists(struct mwifiex_private *priv,
104 struct mwifiex_ra_list_tbl *ra, int tid); 104 struct mwifiex_ra_list_tbl *ra, int tid);
105 105
@@ -123,7 +123,8 @@ void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
123int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, 123int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
124 const struct host_cmd_ds_command *resp); 124 const struct host_cmd_ds_command *resp);
125struct mwifiex_ra_list_tbl * 125struct mwifiex_ra_list_tbl *
126mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr); 126mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
127 const u8 *ra_addr);
127u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid); 128u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
128 129
129#endif /* !_MWIFIEX_WMM_H_ */ 130#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 49300d04efdf..e27e32851f1e 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -988,8 +988,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
988 * tsc must be NULL or up to 8 bytes 988 * tsc must be NULL or up to 8 bytes
989 */ 989 */
990int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, 990int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
991 int set_tx, u8 *key, u8 *rsc, size_t rsc_len, 991 int set_tx, const u8 *key, const u8 *rsc,
992 u8 *tsc, size_t tsc_len) 992 size_t rsc_len, const u8 *tsc, size_t tsc_len)
993{ 993{
994 struct { 994 struct {
995 __le16 idx; 995 __le16 idx;
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8f6831f4e328..466d1ede76f1 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -38,8 +38,8 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
38int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv); 38int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
39int __orinoco_hw_setup_enc(struct orinoco_private *priv); 39int __orinoco_hw_setup_enc(struct orinoco_private *priv);
40int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, 40int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
41 int set_tx, u8 *key, u8 *rsc, size_t rsc_len, 41 int set_tx, const u8 *key, const u8 *rsc,
42 u8 *tsc, size_t tsc_len); 42 size_t rsc_len, const u8 *tsc, size_t tsc_len);
43int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx); 43int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
44int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, 44int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
45 struct net_device *dev, 45 struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 3ac71339d040..c90939ced0e4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1673,7 +1673,7 @@ static int ezusb_probe(struct usb_interface *interface,
1673 firmware.code = fw_entry->data; 1673 firmware.code = fw_entry->data;
1674 } 1674 }
1675 if (firmware.size && firmware.code) { 1675 if (firmware.size && firmware.code) {
1676 if (ezusb_firmware_download(upriv, &firmware)) 1676 if (ezusb_firmware_download(upriv, &firmware) < 0)
1677 goto error; 1677 goto error;
1678 } else { 1678 } else {
1679 err("No firmware to download"); 1679 err("No firmware to download");
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index b7a867b50b94..6abdaf0aa052 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -52,9 +52,9 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
52 priv->keys[index].seq_len = seq_len; 52 priv->keys[index].seq_len = seq_len;
53 53
54 if (key_len) 54 if (key_len)
55 memcpy(priv->keys[index].key, key, key_len); 55 memcpy((void *)priv->keys[index].key, key, key_len);
56 if (seq_len) 56 if (seq_len)
57 memcpy(priv->keys[index].seq, seq, seq_len); 57 memcpy((void *)priv->keys[index].seq, seq, seq_len);
58 58
59 switch (alg) { 59 switch (alg) {
60 case ORINOCO_ALG_TKIP: 60 case ORINOCO_ALG_TKIP:
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index eede90b63f84..7be3a4839640 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
669 return total; 669 return total;
670} 670}
671 671
672static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop) 672static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
673 u32 queues, bool drop)
673{ 674{
674 struct p54_common *priv = dev->priv; 675 struct p54_common *priv = dev->priv;
675 unsigned int total, i; 676 unsigned int total, i;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cbf0a589d32a..8330fa33e50b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
343 ray_release(link); 343 ray_release(link);
344 344
345 local = netdev_priv(dev); 345 local = netdev_priv(dev);
346 del_timer(&local->timer); 346 del_timer_sync(&local->timer);
347 347
348 if (link->priv) { 348 if (link->priv) {
349 unregister_netdev(dev); 349 unregister_netdev(dev);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 39d22a154341..d2a9a08210be 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -517,7 +517,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
517 u8 key_index, bool unicast, bool multicast); 517 u8 key_index, bool unicast, bool multicast);
518 518
519static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, 519static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
520 u8 *mac, struct station_info *sinfo); 520 const u8 *mac, struct station_info *sinfo);
521 521
522static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, 522static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
523 int idx, u8 *mac, struct station_info *sinfo); 523 int idx, u8 *mac, struct station_info *sinfo);
@@ -2490,7 +2490,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
2490} 2490}
2491 2491
2492static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, 2492static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
2493 u8 *mac, struct station_info *sinfo) 2493 const u8 *mac, struct station_info *sinfo)
2494{ 2494{
2495 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2495 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2496 struct usbnet *usbdev = priv->usbdev; 2496 struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 84164747ace0..54aaeb09debf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -656,6 +656,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
656 case IEEE80211_AMPDU_TX_START: 656 case IEEE80211_AMPDU_TX_START:
657 common->vif_info[ii].seq_start = seq_no; 657 common->vif_info[ii].seq_start = seq_no;
658 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 658 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
659 status = 0;
659 break; 660 break;
660 661
661 case IEEE80211_AMPDU_TX_STOP_CONT: 662 case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1b28cda6ca88..2eefbf159bc0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
1083{ 1083{
1084 if (status) { 1084 if (status) {
1085 rsi_hal_send_sta_notify_frame(common, 1085 rsi_hal_send_sta_notify_frame(common,
1086 NL80211_IFTYPE_STATION, 1086 RSI_IFTYPE_STATION,
1087 STA_CONNECTED, 1087 STA_CONNECTED,
1088 bssid, 1088 bssid,
1089 qos_enable, 1089 qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
1092 rsi_send_auto_rate_request(common); 1092 rsi_send_auto_rate_request(common);
1093 } else { 1093 } else {
1094 rsi_hal_send_sta_notify_frame(common, 1094 rsi_hal_send_sta_notify_frame(common,
1095 NL80211_IFTYPE_STATION, 1095 RSI_IFTYPE_STATION,
1096 STA_DISCONNECTED, 1096 STA_DISCONNECTED,
1097 bssid, 1097 bssid,
1098 qos_enable, 1098 qos_enable,
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index f2f70784d4ad..d3fbe33d2324 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -63,7 +63,7 @@ static inline int rsi_create_kthread(struct rsi_common *common,
63 u8 *name) 63 u8 *name)
64{ 64{
65 init_completion(&thread->completion); 65 init_completion(&thread->completion);
66 thread->task = kthread_run(func_ptr, common, name); 66 thread->task = kthread_run(func_ptr, common, "%s", name);
67 if (IS_ERR(thread->task)) 67 if (IS_ERR(thread->task))
68 return (int)PTR_ERR(thread->task); 68 return (int)PTR_ERR(thread->task);
69 69
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ac67c4ad63c2..225215a3b8bb 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -73,6 +73,7 @@
73#define RX_BA_INDICATION 1 73#define RX_BA_INDICATION 1
74#define RSI_TBL_SZ 40 74#define RSI_TBL_SZ 40
75#define MAX_RETRIES 8 75#define MAX_RETRIES 8
76#define RSI_IFTYPE_STATION 0
76 77
77#define STD_RATE_MCS7 0x07 78#define STD_RATE_MCS7 0x07
78#define STD_RATE_MCS6 0x06 79#define STD_RATE_MCS6 0x06
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 41d4a8167dc3..c17fcf272728 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
1005 entry->skb->len + padding_len); 1005 entry->skb->len + padding_len);
1006 1006
1007 /* 1007 /*
1008 * Enable beaconing again. 1008 * Restore beaconing state.
1009 */ 1009 */
1010 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 1010 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
1011 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1012 1011
1013 /* 1012 /*
1014 * Clean up beacon skb. 1013 * Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
1039void rt2800_clear_beacon(struct queue_entry *entry) 1038void rt2800_clear_beacon(struct queue_entry *entry)
1040{ 1039{
1041 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1040 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1042 u32 reg; 1041 u32 orig_reg, reg;
1043 1042
1044 /* 1043 /*
1045 * Disable beaconing while we are reloading the beacon data, 1044 * Disable beaconing while we are reloading the beacon data,
1046 * otherwise we might be sending out invalid data. 1045 * otherwise we might be sending out invalid data.
1047 */ 1046 */
1048 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1047 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
1048 reg = orig_reg;
1049 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 1049 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1050 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1050 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1051 1051
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
1055 rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx); 1055 rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
1056 1056
1057 /* 1057 /*
1058 * Enabled beaconing again. 1058 * Restore beaconing state.
1059 */ 1059 */
1060 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 1060 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
1061 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1062} 1061}
1063EXPORT_SYMBOL_GPL(rt2800_clear_beacon); 1062EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
1064 1063
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e3b885d8f7db..010b76505243 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1448,7 +1448,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
1448 struct ieee80211_vif *vif, u16 queue, 1448 struct ieee80211_vif *vif, u16 queue,
1449 const struct ieee80211_tx_queue_params *params); 1449 const struct ieee80211_tx_queue_params *params);
1450void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); 1450void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
1451void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop); 1451void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1452 u32 queues, bool drop);
1452int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); 1453int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
1453int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 1454int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1454void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, 1455void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index a87ee9b6585a..212ac4842c16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -749,7 +749,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
749} 749}
750EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); 750EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
751 751
752void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 752void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
753 u32 queues, bool drop)
753{ 754{
754 struct rt2x00_dev *rt2x00dev = hw->priv; 755 struct rt2x00_dev *rt2x00dev = hw->priv;
755 struct data_queue *queue; 756 struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 10572452cc21..86c43d112a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -68,6 +68,12 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
68 } 68 }
69 } 69 }
70 70
71 /* If the port is powered down, we get a -EPROTO error, and this
72 * leads to a endless loop. So just say that the device is gone.
73 */
74 if (status == -EPROTO)
75 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
76
71 rt2x00_err(rt2x00dev, 77 rt2x00_err(rt2x00dev,
72 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", 78 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
73 request, offset, status); 79 request, offset, status);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 24402984ee57..9048a9cbe52c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2031static void rt61pci_clear_beacon(struct queue_entry *entry) 2031static void rt61pci_clear_beacon(struct queue_entry *entry)
2032{ 2032{
2033 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 2033 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2034 u32 reg; 2034 u32 orig_reg, reg;
2035 2035
2036 /* 2036 /*
2037 * Disable beaconing while we are reloading the beacon data, 2037 * Disable beaconing while we are reloading the beacon data,
2038 * otherwise we might be sending out invalid data. 2038 * otherwise we might be sending out invalid data.
2039 */ 2039 */
2040 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg); 2040 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
2041 reg = orig_reg;
2041 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 2042 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2042 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); 2043 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2043 2044
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
2048 HW_BEACON_OFFSET(entry->entry_idx), 0); 2049 HW_BEACON_OFFSET(entry->entry_idx), 0);
2049 2050
2050 /* 2051 /*
2051 * Enable beaconing again. 2052 * Restore global beaconing state.
2052 */ 2053 */
2053 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 2054 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
2054 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2055} 2055}
2056 2056
2057/* 2057/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a140170b1eb3..95724ff9c726 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
1597{ 1597{
1598 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1598 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1599 unsigned int beacon_base; 1599 unsigned int beacon_base;
1600 u32 reg; 1600 u32 orig_reg, reg;
1601 1601
1602 /* 1602 /*
1603 * Disable beaconing while we are reloading the beacon data, 1603 * Disable beaconing while we are reloading the beacon data,
1604 * otherwise we might be sending out invalid data. 1604 * otherwise we might be sending out invalid data.
1605 */ 1605 */
1606 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1606 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
1607 reg = orig_reg;
1607 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1608 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1608 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1609 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1609 1610
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
1614 rt2x00usb_register_write(rt2x00dev, beacon_base, 0); 1615 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
1615 1616
1616 /* 1617 /*
1617 * Enable beaconing again. 1618 * Restore beaconing state.
1618 */ 1619 */
1619 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1620 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
1620 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1621} 1621}
1622 1622
1623static int rt73usb_get_tx_data_len(struct queue_entry *entry) 1623static int rt73usb_get_tx_data_len(struct queue_entry *entry)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index 08b056db4a3b..21005bd8b43c 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,5 +1,5 @@
1rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o 1rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
2 2
3obj-$(CONFIG_RTL8180) += rtl8180.o 3obj-$(CONFIG_RTL8180) += rtl818x_pci.o
4 4
5ccflags-y += -Idrivers/net/wireless/rtl818x 5ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 98d8256f0377..2c1c02bafa10 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -284,6 +284,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
284 rx_status.band = dev->conf.chandef.chan->band; 284 rx_status.band = dev->conf.chandef.chan->band;
285 rx_status.mactime = tsft; 285 rx_status.mactime = tsft;
286 rx_status.flag |= RX_FLAG_MACTIME_START; 286 rx_status.flag |= RX_FLAG_MACTIME_START;
287 if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
288 rx_status.flag |= RX_FLAG_SHORTPRE;
287 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 289 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
288 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 290 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
289 291
@@ -461,18 +463,23 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
461 RTL818X_TX_DESC_FLAG_NO_ENC; 463 RTL818X_TX_DESC_FLAG_NO_ENC;
462 464
463 rc_flags = info->control.rates[0].flags; 465 rc_flags = info->control.rates[0].flags;
466
467 /* HW will perform RTS-CTS when only RTS flags is set.
468 * HW will perform CTS-to-self when both RTS and CTS flags are set.
469 * RTS rate and RTS duration will be used also for CTS-to-self.
470 */
464 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 471 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
465 tx_flags |= RTL818X_TX_DESC_FLAG_RTS; 472 tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
466 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 473 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
474 rts_duration = ieee80211_rts_duration(dev, priv->vif,
475 skb->len, info);
467 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 476 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
468 tx_flags |= RTL818X_TX_DESC_FLAG_CTS; 477 tx_flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
469 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 478 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
479 rts_duration = ieee80211_ctstoself_duration(dev, priv->vif,
480 skb->len, info);
470 } 481 }
471 482
472 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
473 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
474 info);
475
476 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) { 483 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
477 unsigned int remainder; 484 unsigned int remainder;
478 485
@@ -683,9 +690,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
683 struct rtl8180_priv *priv = dev->priv; 690 struct rtl8180_priv *priv = dev->priv;
684 691
685 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) { 692 if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
686 rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK | 693 rtl818x_iowrite32(priv, &priv->map->IMR,
687 IMR_TBDER | IMR_THPDER | 694 IMR_TBDER | IMR_TBDOK |
688 IMR_THPDER | IMR_THPDOK |
689 IMR_TVODER | IMR_TVODOK | 695 IMR_TVODER | IMR_TVODOK |
690 IMR_TVIDER | IMR_TVIDOK | 696 IMR_TVIDER | IMR_TVIDOK |
691 IMR_TBEDER | IMR_TBEDOK | 697 IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +917,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
911 reg32 &= 0x00ffff00; 917 reg32 &= 0x00ffff00;
912 reg32 |= 0xb8000054; 918 reg32 |= 0xb8000054;
913 rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32); 919 rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
914 } 920 } else
921 /* stop unused queus (no dma alloc) */
922 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
923 (1<<1) | (1<<2));
915 924
916 priv->rf->init(dev); 925 priv->rf->init(dev);
917 926
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 0ca17cda48fa..629ad8cfa17b 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -253,14 +253,21 @@ static void rtl8187_tx(struct ieee80211_hw *dev,
253 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24; 253 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
254 if (ieee80211_has_morefrags(tx_hdr->frame_control)) 254 if (ieee80211_has_morefrags(tx_hdr->frame_control))
255 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG; 255 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
256
257 /* HW will perform RTS-CTS when only RTS flags is set.
258 * HW will perform CTS-to-self when both RTS and CTS flags are set.
259 * RTS rate and RTS duration will be used also for CTS-to-self.
260 */
256 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 261 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
257 flags |= RTL818X_TX_DESC_FLAG_RTS; 262 flags |= RTL818X_TX_DESC_FLAG_RTS;
258 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 263 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
259 rts_dur = ieee80211_rts_duration(dev, priv->vif, 264 rts_dur = ieee80211_rts_duration(dev, priv->vif,
260 skb->len, info); 265 skb->len, info);
261 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 266 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
262 flags |= RTL818X_TX_DESC_FLAG_CTS; 267 flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
263 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 268 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
269 rts_dur = ieee80211_ctstoself_duration(dev, priv->vif,
270 skb->len, info);
264 } 271 }
265 272
266 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 273 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -381,6 +388,8 @@ static void rtl8187_rx_cb(struct urb *urb)
381 rx_status.freq = dev->conf.chandef.chan->center_freq; 388 rx_status.freq = dev->conf.chandef.chan->center_freq;
382 rx_status.band = dev->conf.chandef.chan->band; 389 rx_status.band = dev->conf.chandef.chan->band;
383 rx_status.flag |= RX_FLAG_MACTIME_START; 390 rx_status.flag |= RX_FLAG_MACTIME_START;
391 if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
392 rx_status.flag |= RX_FLAG_SHORTPRE;
384 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 393 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
385 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 394 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
386 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 395 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 45ea4e1c4abe..7abef95d278b 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -334,9 +334,9 @@ struct rtl818x_csr {
334 * I don't like to introduce a ton of "reserved".. 334 * I don't like to introduce a ton of "reserved"..
335 * They are for RTL8187SE 335 * They are for RTL8187SE
336 */ 336 */
337#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr) 337#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr))
338#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1)) 338#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1))
339#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2)) 339#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2))
340 340
341#define FEMR_SE REG_ADDR2(0x1D4) 341#define FEMR_SE REG_ADDR2(0x1D4)
342#define ARFR REG_ADDR2(0x1E0) 342#define ARFR REG_ADDR2(0x1E0)
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 4ec424f26672..b1ed6d0796f6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
1387 * before switch channel or power save, or tx buffer packet 1387 * before switch channel or power save, or tx buffer packet
1388 * maybe send after offchannel or rf sleep, this may cause 1388 * maybe send after offchannel or rf sleep, this may cause
1389 * dis-association by AP */ 1389 * dis-association by AP */
1390static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 1390static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1391 u32 queues, bool drop)
1391{ 1392{
1392 struct rtl_priv *rtlpriv = rtl_priv(hw); 1393 struct rtl_priv *rtlpriv = rtl_priv(hw);
1393 1394
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 94cd9df98381..b14cf5a10f44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -2515,23 +2515,3 @@ void rtl88ee_suspend(struct ieee80211_hw *hw)
2515void rtl88ee_resume(struct ieee80211_hw *hw) 2515void rtl88ee_resume(struct ieee80211_hw *hw)
2516{ 2516{
2517} 2517}
2518
2519/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2520void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
2521 bool allow_all_da, bool write_into_reg)
2522{
2523 struct rtl_priv *rtlpriv = rtl_priv(hw);
2524 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2525
2526 if (allow_all_da) /* Set BIT0 */
2527 rtlpci->receive_config |= RCR_AAP;
2528 else /* Clear BIT0 */
2529 rtlpci->receive_config &= ~RCR_AAP;
2530
2531 if (write_into_reg)
2532 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2533
2534 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2535 "receive_config = 0x%08X, write_into_reg =%d\n",
2536 rtlpci->receive_config, write_into_reg);
2537}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
index b4460a41bd01..1850fde881b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
@@ -61,8 +61,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
61void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw); 61void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
62void rtl88ee_suspend(struct ieee80211_hw *hw); 62void rtl88ee_suspend(struct ieee80211_hw *hw);
63void rtl88ee_resume(struct ieee80211_hw *hw); 63void rtl88ee_resume(struct ieee80211_hw *hw);
64void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
65 bool allow_all_da, bool write_into_reg);
66void rtl88ee_fw_clk_off_timer_callback(unsigned long data); 64void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
67 65
68#endif 66#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 1b4101bf9974..842d69349a37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,7 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
93 u8 tid; 93 u8 tid;
94 94
95 rtl8188ee_bt_reg_init(hw); 95 rtl8188ee_bt_reg_init(hw);
96 rtlpci->msi_support = true; 96 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
97 97
98 rtlpriv->dm.dm_initialgain_enable = 1; 98 rtlpriv->dm.dm_initialgain_enable = 1;
99 rtlpriv->dm.dm_flag = 0; 99 rtlpriv->dm.dm_flag = 0;
@@ -255,7 +255,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
255 .enable_hw_sec = rtl88ee_enable_hw_security_config, 255 .enable_hw_sec = rtl88ee_enable_hw_security_config,
256 .set_key = rtl88ee_set_key, 256 .set_key = rtl88ee_set_key,
257 .init_sw_leds = rtl88ee_init_sw_leds, 257 .init_sw_leds = rtl88ee_init_sw_leds,
258 .allow_all_destaddr = rtl88ee_allow_all_destaddr,
259 .get_bbreg = rtl88e_phy_query_bb_reg, 258 .get_bbreg = rtl88e_phy_query_bb_reg,
260 .set_bbreg = rtl88e_phy_set_bb_reg, 259 .set_bbreg = rtl88e_phy_set_bb_reg,
261 .get_rfreg = rtl88e_phy_query_rf_reg, 260 .get_rfreg = rtl88e_phy_query_rf_reg,
@@ -267,6 +266,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
267 .inactiveps = true, 266 .inactiveps = true,
268 .swctrl_lps = false, 267 .swctrl_lps = false,
269 .fwctrl_lps = true, 268 .fwctrl_lps = true,
269 .msi_support = false,
270 .debug = DBG_EMERG, 270 .debug = DBG_EMERG,
271}; 271};
272 272
@@ -383,10 +383,12 @@ module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
383module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444); 383module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
384module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444); 384module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
385module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444); 385module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
386module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444);
386MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 387MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
387MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); 388MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
388MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); 389MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
389MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); 390MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
391MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
390MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 392MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
391 393
392static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 394static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 55adf043aef7..cdecb0fd4d8e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -2423,24 +2423,3 @@ void rtl92ce_suspend(struct ieee80211_hw *hw)
2423void rtl92ce_resume(struct ieee80211_hw *hw) 2423void rtl92ce_resume(struct ieee80211_hw *hw)
2424{ 2424{
2425} 2425}
2426
2427/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2428void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
2429 bool allow_all_da, bool write_into_reg)
2430{
2431 struct rtl_priv *rtlpriv = rtl_priv(hw);
2432 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2433
2434 if (allow_all_da) {/* Set BIT0 */
2435 rtlpci->receive_config |= RCR_AAP;
2436 } else {/* Clear BIT0 */
2437 rtlpci->receive_config &= ~RCR_AAP;
2438 }
2439
2440 if (write_into_reg)
2441 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2442
2443 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2444 "receive_config=0x%08X, write_into_reg=%d\n",
2445 rtlpci->receive_config, write_into_reg);
2446}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 2d063b0c7760..5533070f266c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -76,7 +76,5 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
76void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw); 76void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
77void rtl92ce_suspend(struct ieee80211_hw *hw); 77void rtl92ce_suspend(struct ieee80211_hw *hw);
78void rtl92ce_resume(struct ieee80211_hw *hw); 78void rtl92ce_resume(struct ieee80211_hw *hw);
79void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
80 bool allow_all_da, bool write_into_reg);
81 79
82#endif 80#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b790320d2030..12f21f4073e8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -229,7 +229,6 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
229 .enable_hw_sec = rtl92ce_enable_hw_security_config, 229 .enable_hw_sec = rtl92ce_enable_hw_security_config,
230 .set_key = rtl92ce_set_key, 230 .set_key = rtl92ce_set_key,
231 .init_sw_leds = rtl92ce_init_sw_leds, 231 .init_sw_leds = rtl92ce_init_sw_leds,
232 .allow_all_destaddr = rtl92ce_allow_all_destaddr,
233 .get_bbreg = rtl92c_phy_query_bb_reg, 232 .get_bbreg = rtl92c_phy_query_bb_reg,
234 .set_bbreg = rtl92c_phy_set_bb_reg, 233 .set_bbreg = rtl92c_phy_set_bb_reg,
235 .set_rfreg = rtl92ce_phy_set_rf_reg, 234 .set_rfreg = rtl92ce_phy_set_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 07cb06da6729..a903c2671b4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -511,7 +511,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
511 pr_info("MAC auto ON okay!\n"); 511 pr_info("MAC auto ON okay!\n");
512 break; 512 break;
513 } 513 }
514 if (pollingCount++ > 100) { 514 if (pollingCount++ > 1000) {
515 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, 515 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
516 "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); 516 "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
517 return -ENODEV; 517 return -ENODEV;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c61311084d7e..361435f8608a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -395,9 +395,6 @@ static struct usb_driver rtl8192cu_driver = {
395 /* .resume = rtl_usb_resume, */ 395 /* .resume = rtl_usb_resume, */
396 /* .reset_resume = rtl8192c_resume, */ 396 /* .reset_resume = rtl8192c_resume, */
397#endif /* CONFIG_PM */ 397#endif /* CONFIG_PM */
398#ifdef CONFIG_AUTOSUSPEND
399 .supports_autosuspend = 1,
400#endif
401 .disable_hub_initiated_lpm = 1, 398 .disable_hub_initiated_lpm = 1,
402}; 399};
403 400
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 9098558d916d..1c7101bcd790 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -2544,23 +2544,3 @@ void rtl92se_resume(struct ieee80211_hw *hw)
2544 pci_write_config_dword(rtlpci->pdev, 0x40, 2544 pci_write_config_dword(rtlpci->pdev, 0x40,
2545 val & 0xffff00ff); 2545 val & 0xffff00ff);
2546} 2546}
2547
2548/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2549void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
2550 bool allow_all_da, bool write_into_reg)
2551{
2552 struct rtl_priv *rtlpriv = rtl_priv(hw);
2553 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2554
2555 if (allow_all_da) /* Set BIT0 */
2556 rtlpci->receive_config |= RCR_AAP;
2557 else /* Clear BIT0 */
2558 rtlpci->receive_config &= ~RCR_AAP;
2559
2560 if (write_into_reg)
2561 rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
2562
2563 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2564 "receive_config=0x%08X, write_into_reg=%d\n",
2565 rtlpci->receive_config, write_into_reg);
2566}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index da48aa8cbe6f..4cacee10f31e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -74,7 +74,5 @@ void rtl92se_set_key(struct ieee80211_hw *hw,
74 u8 enc_algo, bool is_wepkey, bool clear_all); 74 u8 enc_algo, bool is_wepkey, bool clear_all);
75void rtl92se_suspend(struct ieee80211_hw *hw); 75void rtl92se_suspend(struct ieee80211_hw *hw);
76void rtl92se_resume(struct ieee80211_hw *hw); 76void rtl92se_resume(struct ieee80211_hw *hw);
77void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
78 bool allow_all_da, bool write_into_reg);
79 77
80#endif 78#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 2e8e6f8d2d51..1bff2a0f7600 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -290,7 +290,6 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
290 .enable_hw_sec = rtl92se_enable_hw_security_config, 290 .enable_hw_sec = rtl92se_enable_hw_security_config,
291 .set_key = rtl92se_set_key, 291 .set_key = rtl92se_set_key,
292 .init_sw_leds = rtl92se_init_sw_leds, 292 .init_sw_leds = rtl92se_init_sw_leds,
293 .allow_all_destaddr = rtl92se_allow_all_destaddr,
294 .get_bbreg = rtl92s_phy_query_bb_reg, 293 .get_bbreg = rtl92s_phy_query_bb_reg,
295 .set_bbreg = rtl92s_phy_set_bb_reg, 294 .set_bbreg = rtl92s_phy_set_bb_reg,
296 .get_rfreg = rtl92s_phy_query_rf_reg, 295 .get_rfreg = rtl92s_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 48fee1be78c2..5b4a714f3c8c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -32,7 +32,6 @@
32#include "dm.h" 32#include "dm.h"
33#include "fw.h" 33#include "fw.h"
34#include "../rtl8723com/fw_common.h" 34#include "../rtl8723com/fw_common.h"
35#include "../rtl8723com/fw_common.h"
36#include "phy.h" 35#include "phy.h"
37#include "reg.h" 36#include "reg.h"
38#include "hal_btc.h" 37#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 65c9e80e1f78..87f69166a7ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -2383,24 +2383,3 @@ void rtl8723ae_suspend(struct ieee80211_hw *hw)
2383void rtl8723ae_resume(struct ieee80211_hw *hw) 2383void rtl8723ae_resume(struct ieee80211_hw *hw)
2384{ 2384{
2385} 2385}
2386
2387/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2388void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
2389 bool allow_all_da, bool write_into_reg)
2390{
2391 struct rtl_priv *rtlpriv = rtl_priv(hw);
2392 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2393
2394 if (allow_all_da) /* Set BIT0 */
2395 rtlpci->receive_config |= RCR_AAP;
2396 else /* Clear BIT0 */
2397 rtlpci->receive_config &= ~RCR_AAP;
2398
2399 if (write_into_reg)
2400 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2401
2402
2403 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2404 "receive_config=0x%08X, write_into_reg=%d\n",
2405 rtlpci->receive_config, write_into_reg);
2406}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
index 6fa24f79b1d7..d3bc39fb27a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
@@ -67,7 +67,5 @@ void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
67void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw); 67void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
68void rtl8723ae_suspend(struct ieee80211_hw *hw); 68void rtl8723ae_suspend(struct ieee80211_hw *hw);
69void rtl8723ae_resume(struct ieee80211_hw *hw); 69void rtl8723ae_resume(struct ieee80211_hw *hw);
70void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
71 bool allow_all_da, bool write_into_reg);
72 70
73#endif 71#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 1087a3bd07fa..73cba1eec8cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -238,7 +238,6 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
238 .enable_hw_sec = rtl8723ae_enable_hw_security_config, 238 .enable_hw_sec = rtl8723ae_enable_hw_security_config,
239 .set_key = rtl8723ae_set_key, 239 .set_key = rtl8723ae_set_key,
240 .init_sw_leds = rtl8723ae_init_sw_leds, 240 .init_sw_leds = rtl8723ae_init_sw_leds,
241 .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
242 .get_bbreg = rtl8723_phy_query_bb_reg, 241 .get_bbreg = rtl8723_phy_query_bb_reg,
243 .set_bbreg = rtl8723_phy_set_bb_reg, 242 .set_bbreg = rtl8723_phy_set_bb_reg,
244 .get_rfreg = rtl8723ae_phy_query_rf_reg, 243 .get_rfreg = rtl8723ae_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index 0fdf0909321f..3d555495b453 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -2501,23 +2501,3 @@ void rtl8723be_suspend(struct ieee80211_hw *hw)
2501void rtl8723be_resume(struct ieee80211_hw *hw) 2501void rtl8723be_resume(struct ieee80211_hw *hw)
2502{ 2502{
2503} 2503}
2504
2505/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2506void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
2507 bool write_into_reg)
2508{
2509 struct rtl_priv *rtlpriv = rtl_priv(hw);
2510 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2511
2512 if (allow_all_da) /* Set BIT0 */
2513 rtlpci->receive_config |= RCR_AAP;
2514 else /* Clear BIT0 */
2515 rtlpci->receive_config &= ~RCR_AAP;
2516
2517 if (write_into_reg)
2518 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2519
2520 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2521 "receive_config = 0x%08X, write_into_reg =%d\n",
2522 rtlpci->receive_config, write_into_reg);
2523}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
index b7449a9b57e4..64c7551af6b7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -59,6 +59,4 @@ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
59void rtl8723be_bt_hw_init(struct ieee80211_hw *hw); 59void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
60void rtl8723be_suspend(struct ieee80211_hw *hw); 60void rtl8723be_suspend(struct ieee80211_hw *hw);
61void rtl8723be_resume(struct ieee80211_hw *hw); 61void rtl8723be_resume(struct ieee80211_hw *hw);
62void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
63 bool write_into_reg);
64#endif 62#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index b4577ebc4bb0..ff12bf41644b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
92 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 92 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
93 93
94 rtl8723be_bt_reg_init(hw); 94 rtl8723be_bt_reg_init(hw);
95 rtlpci->msi_support = true; 95 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
96 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); 96 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
97 97
98 rtlpriv->dm.dm_initialgain_enable = 1; 98 rtlpriv->dm.dm_initialgain_enable = 1;
@@ -253,6 +253,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
253 .inactiveps = true, 253 .inactiveps = true,
254 .swctrl_lps = false, 254 .swctrl_lps = false,
255 .fwctrl_lps = true, 255 .fwctrl_lps = true,
256 .msi_support = false,
256 .debug = DBG_EMERG, 257 .debug = DBG_EMERG,
257}; 258};
258 259
@@ -365,9 +366,11 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
365module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 366module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
366module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 367module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
367module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 368module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
369module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
368MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); 370MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
369MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n"); 371MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
370MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n"); 372MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
373MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
371MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 374MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
372 375
373static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 376static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index e0a0d8c8fed5..969eaea5eddd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -33,7 +33,6 @@
33#include "trx.h" 33#include "trx.h"
34#include "led.h" 34#include "led.h"
35#include "dm.h" 35#include "dm.h"
36#include "phy.h"
37 36
38static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) 37static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
39{ 38{
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6965afdf572a..407a7936d364 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1960,8 +1960,6 @@ struct rtl_hal_ops {
1960 u32 regaddr, u32 bitmask); 1960 u32 regaddr, u32 bitmask);
1961 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1961 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1962 u32 regaddr, u32 bitmask, u32 data); 1962 u32 regaddr, u32 bitmask, u32 data);
1963 void (*allow_all_destaddr)(struct ieee80211_hw *hw,
1964 bool allow_all_da, bool write_into_reg);
1965 void (*linked_set_reg) (struct ieee80211_hw *hw); 1963 void (*linked_set_reg) (struct ieee80211_hw *hw);
1966 void (*chk_switch_dmdp) (struct ieee80211_hw *hw); 1964 void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
1967 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw); 1965 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
@@ -2030,6 +2028,10 @@ struct rtl_mod_params {
2030 2028
2031 /* default: 1 = using linked fw power save */ 2029 /* default: 1 = using linked fw power save */
2032 bool fwctrl_lps; 2030 bool fwctrl_lps;
2031
2032 /* default: 0 = not using MSI interrupts mode */
2033 /* submodules should set their own defalut value */
2034 bool msi_support;
2033}; 2035};
2034 2036
2035struct rtl_hal_usbint_cfg { 2037struct rtl_hal_usbint_cfg {
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index 5a4ec56c83d0..5695628757ee 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -2,7 +2,6 @@
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/crc7.h>
6 5
7#include "wl1251.h" 6#include "wl1251.h"
8#include "reg.h" 7#include "reg.h"
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index bf1fa18b9786..ede31f048ef9 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -2,7 +2,6 @@
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/crc7.h>
6#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
7 6
8#include "wl1251.h" 7#include "wl1251.h"
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index db0105313745..c98630394a1a 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -124,11 +124,12 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
124 return ret; 124 return ret;
125 } 125 }
126 126
127 if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) { 127 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
128 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); 128 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
129 129
130 /* indicate to the stack, that beacons have been lost */ 130 /* indicate to the stack, that beacons have been lost */
131 ieee80211_beacon_loss(wl->vif); 131 if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
132 ieee80211_beacon_loss(wl->vif);
132 } 133 }
133 134
134 if (vector & REGAINED_BSS_EVENT_ID) { 135 if (vector & REGAINED_BSS_EVENT_ID) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 757e25784a8a..4e782f18ae34 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -550,6 +550,34 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
550 mutex_unlock(&wl->mutex); 550 mutex_unlock(&wl->mutex);
551} 551}
552 552
553static int wl1251_build_null_data(struct wl1251 *wl)
554{
555 struct sk_buff *skb = NULL;
556 int size;
557 void *ptr;
558 int ret = -ENOMEM;
559
560 if (wl->bss_type == BSS_TYPE_IBSS) {
561 size = sizeof(struct wl12xx_null_data_template);
562 ptr = NULL;
563 } else {
564 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
565 if (!skb)
566 goto out;
567 size = skb->len;
568 ptr = skb->data;
569 }
570
571 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
572
573out:
574 dev_kfree_skb(skb);
575 if (ret)
576 wl1251_warning("cmd buld null data failed: %d", ret);
577
578 return ret;
579}
580
553static int wl1251_build_qos_null_data(struct wl1251 *wl) 581static int wl1251_build_qos_null_data(struct wl1251 *wl)
554{ 582{
555 struct ieee80211_qos_hdr template; 583 struct ieee80211_qos_hdr template;
@@ -687,16 +715,6 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
687 wl->power_level = conf->power_level; 715 wl->power_level = conf->power_level;
688 } 716 }
689 717
690 /*
691 * Tell stack that connection is lost because hw encryption isn't
692 * supported in monitor mode.
693 * This requires temporary enabling of the hw connection monitor flag
694 */
695 if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
696 wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
697 ieee80211_connection_loss(wl->vif);
698 }
699
700out_sleep: 718out_sleep:
701 wl1251_ps_elp_sleep(wl); 719 wl1251_ps_elp_sleep(wl);
702 720
@@ -1103,24 +1121,19 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1103 wl->rssi_thold = bss_conf->cqm_rssi_thold; 1121 wl->rssi_thold = bss_conf->cqm_rssi_thold;
1104 } 1122 }
1105 1123
1106 if (changed & BSS_CHANGED_BSSID) { 1124 if ((changed & BSS_CHANGED_BSSID) &&
1125 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1107 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 1126 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1108 1127
1109 skb = ieee80211_nullfunc_get(wl->hw, wl->vif); 1128 if (!is_zero_ether_addr(wl->bssid)) {
1110 if (!skb) 1129 ret = wl1251_build_null_data(wl);
1111 goto out_sleep; 1130 if (ret < 0)
1112 1131 goto out_sleep;
1113 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
1114 skb->data, skb->len);
1115 dev_kfree_skb(skb);
1116 if (ret < 0)
1117 goto out_sleep;
1118 1132
1119 ret = wl1251_build_qos_null_data(wl); 1133 ret = wl1251_build_qos_null_data(wl);
1120 if (ret < 0) 1134 if (ret < 0)
1121 goto out; 1135 goto out_sleep;
1122 1136
1123 if (wl->bss_type != BSS_TYPE_IBSS) {
1124 ret = wl1251_join(wl, wl->bss_type, wl->channel, 1137 ret = wl1251_join(wl, wl->bss_type, wl->channel,
1125 wl->beacon_int, wl->dtim_period); 1138 wl->beacon_int, wl->dtim_period);
1126 if (ret < 0) 1139 if (ret < 0)
@@ -1129,9 +1142,6 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1129 } 1142 }
1130 1143
1131 if (changed & BSS_CHANGED_ASSOC) { 1144 if (changed & BSS_CHANGED_ASSOC) {
1132 /* Disable temporary enabled hw connection monitor flag */
1133 wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
1134
1135 if (bss_conf->assoc) { 1145 if (bss_conf->assoc) {
1136 wl->beacon_int = bss_conf->beacon_int; 1146 wl->beacon_int = bss_conf->beacon_int;
1137 1147
@@ -1216,8 +1226,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1216 if (ret < 0) 1226 if (ret < 0)
1217 goto out_sleep; 1227 goto out_sleep;
1218 1228
1219 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int, 1229 ret = wl1251_join(wl, wl->bss_type, wl->channel,
1220 wl->channel, wl->dtim_period); 1230 wl->beacon_int, wl->dtim_period);
1221 1231
1222 if (ret < 0) 1232 if (ret < 0)
1223 goto out_sleep; 1233 goto out_sleep;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index b06d36d99362..a0aa8fa72392 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -23,6 +23,7 @@
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/swab.h>
26#include <linux/crc7.h> 27#include <linux/crc7.h>
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28#include <linux/wl12xx.h> 29#include <linux/wl12xx.h>
@@ -83,47 +84,44 @@ static void wl1251_spi_reset(struct wl1251 *wl)
83 84
84static void wl1251_spi_wake(struct wl1251 *wl) 85static void wl1251_spi_wake(struct wl1251 *wl)
85{ 86{
86 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
87 struct spi_transfer t; 87 struct spi_transfer t;
88 struct spi_message m; 88 struct spi_message m;
89 u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
89 90
90 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
91 if (!cmd) { 91 if (!cmd) {
92 wl1251_error("could not allocate cmd for spi init"); 92 wl1251_error("could not allocate cmd for spi init");
93 return; 93 return;
94 } 94 }
95 95
96 memset(crc, 0, sizeof(crc));
97 memset(&t, 0, sizeof(t)); 96 memset(&t, 0, sizeof(t));
98 spi_message_init(&m); 97 spi_message_init(&m);
99 98
100 /* Set WSPI_INIT_COMMAND 99 /* Set WSPI_INIT_COMMAND
101 * the data is being send from the MSB to LSB 100 * the data is being send from the MSB to LSB
102 */ 101 */
103 cmd[2] = 0xff; 102 cmd[0] = 0xff;
104 cmd[3] = 0xff; 103 cmd[1] = 0xff;
105 cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; 104 cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
106 cmd[0] = 0; 105 cmd[3] = 0;
107 cmd[7] = 0; 106 cmd[4] = 0;
108 cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; 107 cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
109 cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; 108 cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
109
110 cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
111 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
110 112
111 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) 113 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
112 cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; 114 cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
113 else 115 else
114 cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; 116 cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
115
116 cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
117 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
118
119 crc[0] = cmd[1];
120 crc[1] = cmd[0];
121 crc[2] = cmd[7];
122 crc[3] = cmd[6];
123 crc[4] = cmd[5];
124 117
125 cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; 118 cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
126 cmd[4] |= WSPI_INIT_CMD_END; 119 /*
120 * The above is the logical order; it must actually be stored
121 * in the buffer byte-swapped.
122 */
123 __swab32s((u32 *)cmd);
124 __swab32s((u32 *)cmd+1);
127 125
128 t.tx_buf = cmd; 126 t.tx_buf = cmd;
129 t.len = WSPI_INIT_CMD_LEN; 127 t.len = WSPI_INIT_CMD_LEN;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index f7381dd69009..0f2cfb0d2a9e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -57,7 +57,7 @@ static const struct file_operations name## _ops = { \
57 wl, &name## _ops); \ 57 wl, &name## _ops); \
58 if (!entry || IS_ERR(entry)) \ 58 if (!entry || IS_ERR(entry)) \
59 goto err; \ 59 goto err; \
60 } while (0); 60 } while (0)
61 61
62 62
63#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \ 63#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
@@ -66,7 +66,7 @@ static const struct file_operations name## _ops = { \
66 wl, &prefix## _## name## _ops); \ 66 wl, &prefix## _## name## _ops); \
67 if (!entry || IS_ERR(entry)) \ 67 if (!entry || IS_ERR(entry)) \
68 goto err; \ 68 goto err; \
69 } while (0); 69 } while (0)
70 70
71#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \ 71#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
72static ssize_t sub## _ ##name## _read(struct file *file, \ 72static ssize_t sub## _ ##name## _read(struct file *file, \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e71eae353368..3d6028e62750 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1416,7 +1416,7 @@ void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1416 1416
1417int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, 1417int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1418 u16 offset, u8 flags, 1418 u16 offset, u8 flags,
1419 u8 *pattern, u8 len) 1419 const u8 *pattern, u8 len)
1420{ 1420{
1421 struct wl12xx_rx_filter_field *field; 1421 struct wl12xx_rx_filter_field *field;
1422 1422
@@ -5184,7 +5184,8 @@ out:
5184 mutex_unlock(&wl->mutex); 5184 mutex_unlock(&wl->mutex);
5185} 5185}
5186 5186
5187static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) 5187static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5188 u32 queues, bool drop)
5188{ 5189{
5189 struct wl1271 *wl = hw->priv; 5190 struct wl1271 *wl = hw->priv;
5190 5191
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 29ef2492951f..d3dd7bfdf3f1 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
217static int wl1271_probe(struct sdio_func *func, 217static int wl1271_probe(struct sdio_func *func,
218 const struct sdio_device_id *id) 218 const struct sdio_device_id *id)
219{ 219{
220 struct wlcore_platdev_data *pdev_data; 220 struct wlcore_platdev_data pdev_data;
221 struct wl12xx_sdio_glue *glue; 221 struct wl12xx_sdio_glue *glue;
222 struct resource res[1]; 222 struct resource res[1];
223 mmc_pm_flag_t mmcflags; 223 mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
228 if (func->num != 0x02) 228 if (func->num != 0x02)
229 return -ENODEV; 229 return -ENODEV;
230 230
231 pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL); 231 memset(&pdev_data, 0x00, sizeof(pdev_data));
232 if (!pdev_data) 232 pdev_data.if_ops = &sdio_ops;
233 goto out;
234
235 pdev_data->if_ops = &sdio_ops;
236 233
237 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 234 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
238 if (!glue) { 235 if (!glue) {
239 dev_err(&func->dev, "can't allocate glue\n"); 236 dev_err(&func->dev, "can't allocate glue\n");
240 goto out_free_pdev_data; 237 goto out;
241 } 238 }
242 239
243 glue->dev = &func->dev; 240 glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
248 /* Use block mode for transferring over one block size of data */ 245 /* Use block mode for transferring over one block size of data */
249 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; 246 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
250 247
251 pdev_data->pdata = wl12xx_get_platform_data(); 248 pdev_data.pdata = wl12xx_get_platform_data();
252 if (IS_ERR(pdev_data->pdata)) { 249 if (IS_ERR(pdev_data.pdata)) {
253 ret = PTR_ERR(pdev_data->pdata); 250 ret = PTR_ERR(pdev_data.pdata);
254 dev_err(glue->dev, "missing wlan platform data: %d\n", ret); 251 dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
255 goto out_free_glue; 252 goto out_free_glue;
256 } 253 }
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
260 dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); 257 dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
261 258
262 if (mmcflags & MMC_PM_KEEP_POWER) 259 if (mmcflags & MMC_PM_KEEP_POWER)
263 pdev_data->pdata->pwr_in_suspend = true; 260 pdev_data.pdata->pwr_in_suspend = true;
264 261
265 sdio_set_drvdata(func, glue); 262 sdio_set_drvdata(func, glue);
266 263
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
289 286
290 memset(res, 0x00, sizeof(res)); 287 memset(res, 0x00, sizeof(res));
291 288
292 res[0].start = pdev_data->pdata->irq; 289 res[0].start = pdev_data.pdata->irq;
293 res[0].flags = IORESOURCE_IRQ; 290 res[0].flags = IORESOURCE_IRQ;
294 res[0].name = "irq"; 291 res[0].name = "irq";
295 292
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
299 goto out_dev_put; 296 goto out_dev_put;
300 } 297 }
301 298
302 ret = platform_device_add_data(glue->core, pdev_data, 299 ret = platform_device_add_data(glue->core, &pdev_data,
303 sizeof(*pdev_data)); 300 sizeof(pdev_data));
304 if (ret) { 301 if (ret) {
305 dev_err(glue->dev, "can't add platform data\n"); 302 dev_err(glue->dev, "can't add platform data\n");
306 goto out_dev_put; 303 goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
319out_free_glue: 316out_free_glue:
320 kfree(glue); 317 kfree(glue);
321 318
322out_free_pdev_data:
323 kfree(pdev_data);
324
325out: 319out:
326 return ret; 320 return ret;
327} 321}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index dbe826dd7c23..392c882b28f0 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -24,11 +24,12 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/swab.h>
27#include <linux/crc7.h> 29#include <linux/crc7.h>
28#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
29#include <linux/wl12xx.h> 31#include <linux/wl12xx.h>
30#include <linux/platform_device.h> 32#include <linux/platform_device.h>
31#include <linux/slab.h>
32 33
33#include "wlcore.h" 34#include "wlcore.h"
34#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
@@ -110,18 +111,16 @@ static void wl12xx_spi_reset(struct device *child)
110static void wl12xx_spi_init(struct device *child) 111static void wl12xx_spi_init(struct device *child)
111{ 112{
112 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 113 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
113 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
114 struct spi_transfer t; 114 struct spi_transfer t;
115 struct spi_message m; 115 struct spi_message m;
116 u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
116 117
117 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
118 if (!cmd) { 118 if (!cmd) {
119 dev_err(child->parent, 119 dev_err(child->parent,
120 "could not allocate cmd for spi init\n"); 120 "could not allocate cmd for spi init\n");
121 return; 121 return;
122 } 122 }
123 123
124 memset(crc, 0, sizeof(crc));
125 memset(&t, 0, sizeof(t)); 124 memset(&t, 0, sizeof(t));
126 spi_message_init(&m); 125 spi_message_init(&m);
127 126
@@ -129,30 +128,29 @@ static void wl12xx_spi_init(struct device *child)
129 * Set WSPI_INIT_COMMAND 128 * Set WSPI_INIT_COMMAND
130 * the data is being send from the MSB to LSB 129 * the data is being send from the MSB to LSB
131 */ 130 */
132 cmd[2] = 0xff; 131 cmd[0] = 0xff;
133 cmd[3] = 0xff; 132 cmd[1] = 0xff;
134 cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; 133 cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
135 cmd[0] = 0; 134 cmd[3] = 0;
136 cmd[7] = 0; 135 cmd[4] = 0;
137 cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; 136 cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
138 cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; 137 cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
138
139 cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
140 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
139 141
140 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) 142 if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
141 cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; 143 cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
142 else 144 else
143 cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; 145 cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
144
145 cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
146 | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
147
148 crc[0] = cmd[1];
149 crc[1] = cmd[0];
150 crc[2] = cmd[7];
151 crc[3] = cmd[6];
152 crc[4] = cmd[5];
153 146
154 cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; 147 cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
155 cmd[4] |= WSPI_INIT_CMD_END; 148 /*
149 * The above is the logical order; it must actually be stored
150 * in the buffer byte-swapped.
151 */
152 __swab32s((u32 *)cmd);
153 __swab32s((u32 *)cmd+1);
156 154
157 t.tx_buf = cmd; 155 t.tx_buf = cmd;
158 t.len = WSPI_INIT_CMD_LEN; 156 t.len = WSPI_INIT_CMD_LEN;
@@ -327,27 +325,25 @@ static struct wl1271_if_operations spi_ops = {
327static int wl1271_probe(struct spi_device *spi) 325static int wl1271_probe(struct spi_device *spi)
328{ 326{
329 struct wl12xx_spi_glue *glue; 327 struct wl12xx_spi_glue *glue;
330 struct wlcore_platdev_data *pdev_data; 328 struct wlcore_platdev_data pdev_data;
331 struct resource res[1]; 329 struct resource res[1];
332 int ret = -ENOMEM; 330 int ret = -ENOMEM;
333 331
334 pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL); 332 memset(&pdev_data, 0x00, sizeof(pdev_data));
335 if (!pdev_data)
336 goto out;
337 333
338 pdev_data->pdata = dev_get_platdata(&spi->dev); 334 pdev_data.pdata = dev_get_platdata(&spi->dev);
339 if (!pdev_data->pdata) { 335 if (!pdev_data.pdata) {
340 dev_err(&spi->dev, "no platform data\n"); 336 dev_err(&spi->dev, "no platform data\n");
341 ret = -ENODEV; 337 ret = -ENODEV;
342 goto out_free_pdev_data; 338 goto out;
343 } 339 }
344 340
345 pdev_data->if_ops = &spi_ops; 341 pdev_data.if_ops = &spi_ops;
346 342
347 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 343 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
348 if (!glue) { 344 if (!glue) {
349 dev_err(&spi->dev, "can't allocate glue\n"); 345 dev_err(&spi->dev, "can't allocate glue\n");
350 goto out_free_pdev_data; 346 goto out;
351 } 347 }
352 348
353 glue->dev = &spi->dev; 349 glue->dev = &spi->dev;
@@ -385,8 +381,8 @@ static int wl1271_probe(struct spi_device *spi)
385 goto out_dev_put; 381 goto out_dev_put;
386 } 382 }
387 383
388 ret = platform_device_add_data(glue->core, pdev_data, 384 ret = platform_device_add_data(glue->core, &pdev_data,
389 sizeof(*pdev_data)); 385 sizeof(pdev_data));
390 if (ret) { 386 if (ret) {
391 dev_err(glue->dev, "can't add platform data\n"); 387 dev_err(glue->dev, "can't add platform data\n");
392 goto out_dev_put; 388 goto out_dev_put;
@@ -406,9 +402,6 @@ out_dev_put:
406out_free_glue: 402out_free_glue:
407 kfree(glue); 403 kfree(glue);
408 404
409out_free_pdev_data:
410 kfree(pdev_data);
411
412out: 405out:
413 return ret; 406 return ret;
414} 407}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 756e890bc5ee..c2c34a84ff3d 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -512,8 +512,8 @@ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
512void wl12xx_queue_recovery_work(struct wl1271 *wl); 512void wl12xx_queue_recovery_work(struct wl1271 *wl);
513size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); 513size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
514int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, 514int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
515 u16 offset, u8 flags, 515 u16 offset, u8 flags,
516 u8 *pattern, u8 len); 516 const u8 *pattern, u8 len);
517void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter); 517void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
518struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void); 518struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
519int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter); 519int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0d4a285cbd7e..4dd7c4a1923b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
99 */ 99 */
100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN 100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
101 101
102struct xenvif { 102/* Queue name is interface name with "-qNNN" appended */
103 /* Unique identifier for this interface. */ 103#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
104 domid_t domid;
105 unsigned int handle;
106 104
107 /* Is this interface disabled? True when backend discovers 105/* IRQ name is queue name with "-tx" or "-rx" appended */
108 * frontend is rogue. 106#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
107
108struct xenvif;
109
110struct xenvif_stats {
111 /* Stats fields to be updated per-queue.
112 * A subset of struct net_device_stats that contains only the
113 * fields that are updated in netback.c for each queue.
109 */ 114 */
110 bool disabled; 115 unsigned int rx_bytes;
116 unsigned int rx_packets;
117 unsigned int tx_bytes;
118 unsigned int tx_packets;
119
120 /* Additional stats used by xenvif */
121 unsigned long rx_gso_checksum_fixup;
122 unsigned long tx_zerocopy_sent;
123 unsigned long tx_zerocopy_success;
124 unsigned long tx_zerocopy_fail;
125 unsigned long tx_frag_overflow;
126};
127
128struct xenvif_queue { /* Per-queue data for xenvif */
129 unsigned int id; /* Queue ID, 0-based */
130 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
131 struct xenvif *vif; /* Parent VIF */
111 132
112 /* Use NAPI for guest TX */ 133 /* Use NAPI for guest TX */
113 struct napi_struct napi; 134 struct napi_struct napi;
114 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 135 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
115 unsigned int tx_irq; 136 unsigned int tx_irq;
116 /* Only used when feature-split-event-channels = 1 */ 137 /* Only used when feature-split-event-channels = 1 */
117 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 138 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
118 struct xen_netif_tx_back_ring tx; 139 struct xen_netif_tx_back_ring tx;
119 struct sk_buff_head tx_queue; 140 struct sk_buff_head tx_queue;
120 struct page *mmap_pages[MAX_PENDING_REQS]; 141 struct page *mmap_pages[MAX_PENDING_REQS];
@@ -150,7 +171,7 @@ struct xenvif {
150 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 171 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
151 unsigned int rx_irq; 172 unsigned int rx_irq;
152 /* Only used when feature-split-event-channels = 1 */ 173 /* Only used when feature-split-event-channels = 1 */
153 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 174 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
154 struct xen_netif_rx_back_ring rx; 175 struct xen_netif_rx_back_ring rx;
155 struct sk_buff_head rx_queue; 176 struct sk_buff_head rx_queue;
156 RING_IDX rx_last_skb_slots; 177 RING_IDX rx_last_skb_slots;
@@ -158,14 +179,29 @@ struct xenvif {
158 179
159 struct timer_list wake_queue; 180 struct timer_list wake_queue;
160 181
161 /* This array is allocated seperately as it is large */ 182 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
162 struct gnttab_copy *grant_copy_op;
163 183
164 /* We create one meta structure per ring request we consume, so 184 /* We create one meta structure per ring request we consume, so
165 * the maximum number is the same as the ring size. 185 * the maximum number is the same as the ring size.
166 */ 186 */
167 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; 187 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
168 188
189 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
190 unsigned long credit_bytes;
191 unsigned long credit_usec;
192 unsigned long remaining_credit;
193 struct timer_list credit_timeout;
194 u64 credit_window_start;
195
196 /* Statistics */
197 struct xenvif_stats stats;
198};
199
200struct xenvif {
201 /* Unique identifier for this interface. */
202 domid_t domid;
203 unsigned int handle;
204
169 u8 fe_dev_addr[6]; 205 u8 fe_dev_addr[6];
170 206
171 /* Frontend feature information. */ 207 /* Frontend feature information. */
@@ -179,19 +215,13 @@ struct xenvif {
179 /* Internal feature information. */ 215 /* Internal feature information. */
180 u8 can_queue:1; /* can queue packets for receiver? */ 216 u8 can_queue:1; /* can queue packets for receiver? */
181 217
182 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 218 /* Is this interface disabled? True when backend discovers
183 unsigned long credit_bytes; 219 * frontend is rogue.
184 unsigned long credit_usec; 220 */
185 unsigned long remaining_credit; 221 bool disabled;
186 struct timer_list credit_timeout;
187 u64 credit_window_start;
188 222
189 /* Statistics */ 223 /* Queues */
190 unsigned long rx_gso_checksum_fixup; 224 struct xenvif_queue *queues;
191 unsigned long tx_zerocopy_sent;
192 unsigned long tx_zerocopy_success;
193 unsigned long tx_zerocopy_fail;
194 unsigned long tx_frag_overflow;
195 225
196 /* Miscellaneous private stuff. */ 226 /* Miscellaneous private stuff. */
197 struct net_device *dev; 227 struct net_device *dev;
@@ -206,7 +236,10 @@ struct xenvif *xenvif_alloc(struct device *parent,
206 domid_t domid, 236 domid_t domid,
207 unsigned int handle); 237 unsigned int handle);
208 238
209int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 239int xenvif_init_queue(struct xenvif_queue *queue);
240void xenvif_deinit_queue(struct xenvif_queue *queue);
241
242int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
210 unsigned long rx_ring_ref, unsigned int tx_evtchn, 243 unsigned long rx_ring_ref, unsigned int tx_evtchn,
211 unsigned int rx_evtchn); 244 unsigned int rx_evtchn);
212void xenvif_disconnect(struct xenvif *vif); 245void xenvif_disconnect(struct xenvif *vif);
@@ -217,44 +250,47 @@ void xenvif_xenbus_fini(void);
217 250
218int xenvif_schedulable(struct xenvif *vif); 251int xenvif_schedulable(struct xenvif *vif);
219 252
220int xenvif_must_stop_queue(struct xenvif *vif); 253int xenvif_must_stop_queue(struct xenvif_queue *queue);
254
255int xenvif_queue_stopped(struct xenvif_queue *queue);
256void xenvif_wake_queue(struct xenvif_queue *queue);
221 257
222/* (Un)Map communication rings. */ 258/* (Un)Map communication rings. */
223void xenvif_unmap_frontend_rings(struct xenvif *vif); 259void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
224int xenvif_map_frontend_rings(struct xenvif *vif, 260int xenvif_map_frontend_rings(struct xenvif_queue *queue,
225 grant_ref_t tx_ring_ref, 261 grant_ref_t tx_ring_ref,
226 grant_ref_t rx_ring_ref); 262 grant_ref_t rx_ring_ref);
227 263
228/* Check for SKBs from frontend and schedule backend processing */ 264/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); 265void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
230 266
231/* Prevent the device from generating any further traffic. */ 267/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 268void xenvif_carrier_off(struct xenvif *vif);
233 269
234int xenvif_tx_action(struct xenvif *vif, int budget); 270int xenvif_tx_action(struct xenvif_queue *queue, int budget);
235 271
236int xenvif_kthread_guest_rx(void *data); 272int xenvif_kthread_guest_rx(void *data);
237void xenvif_kick_thread(struct xenvif *vif); 273void xenvif_kick_thread(struct xenvif_queue *queue);
238 274
239int xenvif_dealloc_kthread(void *data); 275int xenvif_dealloc_kthread(void *data);
240 276
241/* Determine whether the needed number of slots (req) are available, 277/* Determine whether the needed number of slots (req) are available,
242 * and set req_event if not. 278 * and set req_event if not.
243 */ 279 */
244bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); 280bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
245 281
246void xenvif_stop_queue(struct xenvif *vif); 282void xenvif_carrier_on(struct xenvif *vif);
247 283
248/* Callback from stack when TX packet can be released */ 284/* Callback from stack when TX packet can be released */
249void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); 285void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
250 286
251/* Unmap a pending page and release it back to the guest */ 287/* Unmap a pending page and release it back to the guest */
252void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); 288void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
253 289
254static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) 290static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
255{ 291{
256 return MAX_PENDING_REQS - 292 return MAX_PENDING_REQS -
257 vif->pending_prod + vif->pending_cons; 293 queue->pending_prod + queue->pending_cons;
258} 294}
259 295
260/* Callback from stack when TX packet can be released */ 296/* Callback from stack when TX packet can be released */
@@ -264,5 +300,6 @@ extern bool separate_tx_rx_irq;
264 300
265extern unsigned int rx_drain_timeout_msecs; 301extern unsigned int rx_drain_timeout_msecs;
266extern unsigned int rx_drain_timeout_jiffies; 302extern unsigned int rx_drain_timeout_jiffies;
303extern unsigned int xenvif_max_queues;
267 304
268#endif /* __XEN_NETBACK__COMMON_H__ */ 305#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 20e9defa1060..852da34b8961 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,16 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46static inline void xenvif_stop_queue(struct xenvif_queue *queue)
47{
48 struct net_device *dev = queue->vif->dev;
49
50 if (!queue->vif->can_queue)
51 return;
52
53 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
54}
55
46int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
47{ 57{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,33 +60,34 @@ int xenvif_schedulable(struct xenvif *vif)
50 60
51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
52{ 62{
53 struct xenvif *vif = dev_id; 63 struct xenvif_queue *queue = dev_id;
54 64
55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) 65 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
56 napi_schedule(&vif->napi); 66 napi_schedule(&queue->napi);
57 67
58 return IRQ_HANDLED; 68 return IRQ_HANDLED;
59} 69}
60 70
61static int xenvif_poll(struct napi_struct *napi, int budget) 71int xenvif_poll(struct napi_struct *napi, int budget)
62{ 72{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi); 73 struct xenvif_queue *queue =
74 container_of(napi, struct xenvif_queue, napi);
64 int work_done; 75 int work_done;
65 76
66 /* This vif is rogue, we pretend we've there is nothing to do 77 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface 78 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later. 79 * will be turned off in thread context later.
69 */ 80 */
70 if (unlikely(vif->disabled)) { 81 if (unlikely(queue->vif->disabled)) {
71 napi_complete(napi); 82 napi_complete(napi);
72 return 0; 83 return 0;
73 } 84 }
74 85
75 work_done = xenvif_tx_action(vif, budget); 86 work_done = xenvif_tx_action(queue, budget);
76 87
77 if (work_done < budget) { 88 if (work_done < budget) {
78 napi_complete(napi); 89 napi_complete(napi);
79 xenvif_napi_schedule_or_enable_events(vif); 90 xenvif_napi_schedule_or_enable_events(queue);
80 } 91 }
81 92
82 return work_done; 93 return work_done;
@@ -84,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
84 95
85static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
86{ 97{
87 struct xenvif *vif = dev_id; 98 struct xenvif_queue *queue = dev_id;
88 99
89 xenvif_kick_thread(vif); 100 xenvif_kick_thread(queue);
90 101
91 return IRQ_HANDLED; 102 return IRQ_HANDLED;
92} 103}
@@ -99,28 +110,80 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
99 return IRQ_HANDLED; 110 return IRQ_HANDLED;
100} 111}
101 112
102static void xenvif_wake_queue(unsigned long data) 113int xenvif_queue_stopped(struct xenvif_queue *queue)
103{ 114{
104 struct xenvif *vif = (struct xenvif *)data; 115 struct net_device *dev = queue->vif->dev;
116 unsigned int id = queue->id;
117 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
118}
105 119
106 if (netif_queue_stopped(vif->dev)) { 120void xenvif_wake_queue(struct xenvif_queue *queue)
107 netdev_err(vif->dev, "draining TX queue\n"); 121{
108 vif->rx_queue_purge = true; 122 struct net_device *dev = queue->vif->dev;
109 xenvif_kick_thread(vif); 123 unsigned int id = queue->id;
110 netif_wake_queue(vif->dev); 124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125}
126
127/* Callback to wake the queue and drain it on timeout */
128static void xenvif_wake_queue_callback(unsigned long data)
129{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131
132 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n");
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
111 } 137 }
112} 138}
113 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
114static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
115{ 162{
116 struct xenvif *vif = netdev_priv(dev); 163 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues;
166 u16 index;
117 int min_slots_needed; 167 int min_slots_needed;
118 168
119 BUG_ON(skb->dev != dev); 169 BUG_ON(skb->dev != dev);
120 170
121 /* Drop the packet if vif is not ready */ 171 /* Drop the packet if queues are not set up */
122 if (vif->task == NULL || 172 if (num_queues < 1)
123 vif->dealloc_task == NULL || 173 goto drop;
174
175 /* Obtain the queue to be used to transmit this packet */
176 index = skb_get_queue_mapping(skb);
177 if (index >= num_queues) {
178 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
179 index, vif->dev->name);
180 index %= num_queues;
181 }
182 queue = &vif->queues[index];
183
184 /* Drop the packet if queue is not ready */
185 if (queue->task == NULL ||
186 queue->dealloc_task == NULL ||
124 !xenvif_schedulable(vif)) 187 !xenvif_schedulable(vif))
125 goto drop; 188 goto drop;
126 189
@@ -139,16 +202,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
139 * then turn off the queue to give the ring a chance to 202 * then turn off the queue to give the ring a chance to
140 * drain. 203 * drain.
141 */ 204 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { 205 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
143 vif->wake_queue.function = xenvif_wake_queue; 206 queue->wake_queue.function = xenvif_wake_queue_callback;
144 vif->wake_queue.data = (unsigned long)vif; 207 queue->wake_queue.data = (unsigned long)queue;
145 xenvif_stop_queue(vif); 208 xenvif_stop_queue(queue);
146 mod_timer(&vif->wake_queue, 209 mod_timer(&queue->wake_queue,
147 jiffies + rx_drain_timeout_jiffies); 210 jiffies + rx_drain_timeout_jiffies);
148 } 211 }
149 212
150 skb_queue_tail(&vif->rx_queue, skb); 213 skb_queue_tail(&queue->rx_queue, skb);
151 xenvif_kick_thread(vif); 214 xenvif_kick_thread(queue);
152 215
153 return NETDEV_TX_OK; 216 return NETDEV_TX_OK;
154 217
@@ -161,25 +224,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
161static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 224static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{ 225{
163 struct xenvif *vif = netdev_priv(dev); 226 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues;
229 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0;
232 unsigned long tx_packets = 0;
233 unsigned int index;
234
235 if (vif->queues == NULL)
236 goto out;
237
238 /* Aggregate tx and rx stats from each queue */
239 for (index = 0; index < num_queues; ++index) {
240 queue = &vif->queues[index];
241 rx_bytes += queue->stats.rx_bytes;
242 rx_packets += queue->stats.rx_packets;
243 tx_bytes += queue->stats.tx_bytes;
244 tx_packets += queue->stats.tx_packets;
245 }
246
247out:
248 vif->dev->stats.rx_bytes = rx_bytes;
249 vif->dev->stats.rx_packets = rx_packets;
250 vif->dev->stats.tx_bytes = tx_bytes;
251 vif->dev->stats.tx_packets = tx_packets;
252
164 return &vif->dev->stats; 253 return &vif->dev->stats;
165} 254}
166 255
167static void xenvif_up(struct xenvif *vif) 256static void xenvif_up(struct xenvif *vif)
168{ 257{
169 napi_enable(&vif->napi); 258 struct xenvif_queue *queue = NULL;
170 enable_irq(vif->tx_irq); 259 unsigned int num_queues = vif->dev->real_num_tx_queues;
171 if (vif->tx_irq != vif->rx_irq) 260 unsigned int queue_index;
172 enable_irq(vif->rx_irq); 261
173 xenvif_napi_schedule_or_enable_events(vif); 262 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
263 queue = &vif->queues[queue_index];
264 napi_enable(&queue->napi);
265 enable_irq(queue->tx_irq);
266 if (queue->tx_irq != queue->rx_irq)
267 enable_irq(queue->rx_irq);
268 xenvif_napi_schedule_or_enable_events(queue);
269 }
174} 270}
175 271
176static void xenvif_down(struct xenvif *vif) 272static void xenvif_down(struct xenvif *vif)
177{ 273{
178 napi_disable(&vif->napi); 274 struct xenvif_queue *queue = NULL;
179 disable_irq(vif->tx_irq); 275 unsigned int num_queues = vif->dev->real_num_tx_queues;
180 if (vif->tx_irq != vif->rx_irq) 276 unsigned int queue_index;
181 disable_irq(vif->rx_irq); 277
182 del_timer_sync(&vif->credit_timeout); 278 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
279 queue = &vif->queues[queue_index];
280 napi_disable(&queue->napi);
281 disable_irq(queue->tx_irq);
282 if (queue->tx_irq != queue->rx_irq)
283 disable_irq(queue->rx_irq);
284 del_timer_sync(&queue->credit_timeout);
285 }
183} 286}
184 287
185static int xenvif_open(struct net_device *dev) 288static int xenvif_open(struct net_device *dev)
@@ -187,7 +290,7 @@ static int xenvif_open(struct net_device *dev)
187 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev)) 291 if (netif_carrier_ok(dev))
189 xenvif_up(vif); 292 xenvif_up(vif);
190 netif_start_queue(dev); 293 netif_tx_start_all_queues(dev);
191 return 0; 294 return 0;
192} 295}
193 296
@@ -196,7 +299,7 @@ static int xenvif_close(struct net_device *dev)
196 struct xenvif *vif = netdev_priv(dev); 299 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev)) 300 if (netif_carrier_ok(dev))
198 xenvif_down(vif); 301 xenvif_down(vif);
199 netif_stop_queue(dev); 302 netif_tx_stop_all_queues(dev);
200 return 0; 303 return 0;
201} 304}
202 305
@@ -236,29 +339,29 @@ static const struct xenvif_stat {
236} xenvif_stats[] = { 339} xenvif_stats[] = {
237 { 340 {
238 "rx_gso_checksum_fixup", 341 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup) 342 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
240 }, 343 },
241 /* If (sent != success + fail), there are probably packets never 344 /* If (sent != success + fail), there are probably packets never
242 * freed up properly! 345 * freed up properly!
243 */ 346 */
244 { 347 {
245 "tx_zerocopy_sent", 348 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent), 349 offsetof(struct xenvif_stats, tx_zerocopy_sent),
247 }, 350 },
248 { 351 {
249 "tx_zerocopy_success", 352 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success), 353 offsetof(struct xenvif_stats, tx_zerocopy_success),
251 }, 354 },
252 { 355 {
253 "tx_zerocopy_fail", 356 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail) 357 offsetof(struct xenvif_stats, tx_zerocopy_fail)
255 }, 358 },
256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 359 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
257 * a guest with the same MAX_SKB_FRAG 360 * a guest with the same MAX_SKB_FRAG
258 */ 361 */
259 { 362 {
260 "tx_frag_overflow", 363 "tx_frag_overflow",
261 offsetof(struct xenvif, tx_frag_overflow) 364 offsetof(struct xenvif_stats, tx_frag_overflow)
262 }, 365 },
263}; 366};
264 367
@@ -275,11 +378,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
275static void xenvif_get_ethtool_stats(struct net_device *dev, 378static void xenvif_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *stats, u64 * data) 379 struct ethtool_stats *stats, u64 * data)
277{ 380{
278 void *vif = netdev_priv(dev); 381 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues;
279 int i; 383 int i;
280 384 unsigned int queue_index;
281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 385 struct xenvif_stats *vif_stats;
282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); 386
387 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
388 unsigned long accum = 0;
389 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
390 vif_stats = &vif->queues[queue_index].stats;
391 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
392 }
393 data[i] = accum;
394 }
283} 395}
284 396
285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 397static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -312,6 +424,7 @@ static const struct net_device_ops xenvif_netdev_ops = {
312 .ndo_fix_features = xenvif_fix_features, 424 .ndo_fix_features = xenvif_fix_features,
313 .ndo_set_mac_address = eth_mac_addr, 425 .ndo_set_mac_address = eth_mac_addr,
314 .ndo_validate_addr = eth_validate_addr, 426 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
315}; 428};
316 429
317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -321,10 +434,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
321 struct net_device *dev; 434 struct net_device *dev;
322 struct xenvif *vif; 435 struct xenvif *vif;
323 char name[IFNAMSIZ] = {}; 436 char name[IFNAMSIZ] = {};
324 int i;
325 437
326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 439 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues().
442 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues);
328 if (dev == NULL) { 445 if (dev == NULL) {
329 pr_warn("Could not allocate netdev for %s\n", name); 446 pr_warn("Could not allocate netdev for %s\n", name);
330 return ERR_PTR(-ENOMEM); 447 return ERR_PTR(-ENOMEM);
@@ -334,66 +451,28 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
334 451
335 vif = netdev_priv(dev); 452 vif = netdev_priv(dev);
336 453
337 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
338 MAX_GRANT_COPY_OPS);
339 if (vif->grant_copy_op == NULL) {
340 pr_warn("Could not allocate grant copy space for %s\n", name);
341 free_netdev(dev);
342 return ERR_PTR(-ENOMEM);
343 }
344
345 vif->domid = domid; 454 vif->domid = domid;
346 vif->handle = handle; 455 vif->handle = handle;
347 vif->can_sg = 1; 456 vif->can_sg = 1;
348 vif->ip_csum = 1; 457 vif->ip_csum = 1;
349 vif->dev = dev; 458 vif->dev = dev;
350
351 vif->disabled = false; 459 vif->disabled = false;
352 460
353 vif->credit_bytes = vif->remaining_credit = ~0UL; 461 /* Start out with no queues. The call below does not require
354 vif->credit_usec = 0UL; 462 * rtnl_lock() as it happens before register_netdev().
355 init_timer(&vif->credit_timeout); 463 */
356 vif->credit_window_start = get_jiffies_64(); 464 vif->queues = NULL;
357 465 netif_set_real_num_tx_queues(dev, 0);
358 init_timer(&vif->wake_queue);
359 466
360 dev->netdev_ops = &xenvif_netdev_ops; 467 dev->netdev_ops = &xenvif_netdev_ops;
361 dev->hw_features = NETIF_F_SG | 468 dev->hw_features = NETIF_F_SG |
362 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
363 NETIF_F_TSO | NETIF_F_TSO6; 470 NETIF_F_TSO | NETIF_F_TSO6;
364 dev->features = dev->hw_features | NETIF_F_RXCSUM; 471 dev->features = dev->hw_features | NETIF_F_RXCSUM;
365 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 472 dev->ethtool_ops = &xenvif_ethtool_ops;
366 473
367 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 474 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
368 475
369 skb_queue_head_init(&vif->rx_queue);
370 skb_queue_head_init(&vif->tx_queue);
371
372 vif->pending_cons = 0;
373 vif->pending_prod = MAX_PENDING_REQS;
374 for (i = 0; i < MAX_PENDING_REQS; i++)
375 vif->pending_ring[i] = i;
376 spin_lock_init(&vif->callback_lock);
377 spin_lock_init(&vif->response_lock);
378 /* If ballooning is disabled, this will consume real memory, so you
379 * better enable it. The long term solution would be to use just a
380 * bunch of valid page descriptors, without dependency on ballooning
381 */
382 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
383 vif->mmap_pages,
384 false);
385 if (err) {
386 netdev_err(dev, "Could not reserve mmap_pages\n");
387 return ERR_PTR(-ENOMEM);
388 }
389 for (i = 0; i < MAX_PENDING_REQS; i++) {
390 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
391 { .callback = xenvif_zerocopy_callback,
392 .ctx = NULL,
393 .desc = i };
394 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
395 }
396
397 /* 476 /*
398 * Initialise a dummy MAC address. We choose the numerically 477 * Initialise a dummy MAC address. We choose the numerically
399 * largest non-broadcast address to prevent the address getting 478 * largest non-broadcast address to prevent the address getting
@@ -403,8 +482,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
403 memset(dev->dev_addr, 0xFF, ETH_ALEN); 482 memset(dev->dev_addr, 0xFF, ETH_ALEN);
404 dev->dev_addr[0] &= ~0x01; 483 dev->dev_addr[0] &= ~0x01;
405 484
406 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
407
408 netif_carrier_off(dev); 485 netif_carrier_off(dev);
409 486
410 err = register_netdev(dev); 487 err = register_netdev(dev);
@@ -421,98 +498,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
421 return vif; 498 return vif;
422} 499}
423 500
424int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 501int xenvif_init_queue(struct xenvif_queue *queue)
502{
503 int err, i;
504
505 queue->credit_bytes = queue->remaining_credit = ~0UL;
506 queue->credit_usec = 0UL;
507 init_timer(&queue->credit_timeout);
508 queue->credit_window_start = get_jiffies_64();
509
510 skb_queue_head_init(&queue->rx_queue);
511 skb_queue_head_init(&queue->tx_queue);
512
513 queue->pending_cons = 0;
514 queue->pending_prod = MAX_PENDING_REQS;
515 for (i = 0; i < MAX_PENDING_REQS; ++i)
516 queue->pending_ring[i] = i;
517
518 spin_lock_init(&queue->callback_lock);
519 spin_lock_init(&queue->response_lock);
520
521 /* If ballooning is disabled, this will consume real memory, so you
522 * better enable it. The long term solution would be to use just a
523 * bunch of valid page descriptors, without dependency on ballooning
524 */
525 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
526 queue->mmap_pages,
527 false);
528 if (err) {
529 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
530 return -ENOMEM;
531 }
532
533 for (i = 0; i < MAX_PENDING_REQS; i++) {
534 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
535 { .callback = xenvif_zerocopy_callback,
536 .ctx = NULL,
537 .desc = i };
538 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
539 }
540
541 init_timer(&queue->wake_queue);
542
543 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
544 XENVIF_NAPI_WEIGHT);
545
546 return 0;
547}
548
549void xenvif_carrier_on(struct xenvif *vif)
550{
551 rtnl_lock();
552 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
553 dev_set_mtu(vif->dev, ETH_DATA_LEN);
554 netdev_update_features(vif->dev);
555 netif_carrier_on(vif->dev);
556 if (netif_running(vif->dev))
557 xenvif_up(vif);
558 rtnl_unlock();
559}
560
561int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
425 unsigned long rx_ring_ref, unsigned int tx_evtchn, 562 unsigned long rx_ring_ref, unsigned int tx_evtchn,
426 unsigned int rx_evtchn) 563 unsigned int rx_evtchn)
427{ 564{
428 struct task_struct *task; 565 struct task_struct *task;
429 int err = -ENOMEM; 566 int err = -ENOMEM;
430 567
431 BUG_ON(vif->tx_irq); 568 BUG_ON(queue->tx_irq);
432 BUG_ON(vif->task); 569 BUG_ON(queue->task);
433 BUG_ON(vif->dealloc_task); 570 BUG_ON(queue->dealloc_task);
434 571
435 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 572 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
436 if (err < 0) 573 if (err < 0)
437 goto err; 574 goto err;
438 575
439 init_waitqueue_head(&vif->wq); 576 init_waitqueue_head(&queue->wq);
440 init_waitqueue_head(&vif->dealloc_wq); 577 init_waitqueue_head(&queue->dealloc_wq);
441 578
442 if (tx_evtchn == rx_evtchn) { 579 if (tx_evtchn == rx_evtchn) {
443 /* feature-split-event-channels == 0 */ 580 /* feature-split-event-channels == 0 */
444 err = bind_interdomain_evtchn_to_irqhandler( 581 err = bind_interdomain_evtchn_to_irqhandler(
445 vif->domid, tx_evtchn, xenvif_interrupt, 0, 582 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
446 vif->dev->name, vif); 583 queue->name, queue);
447 if (err < 0) 584 if (err < 0)
448 goto err_unmap; 585 goto err_unmap;
449 vif->tx_irq = vif->rx_irq = err; 586 queue->tx_irq = queue->rx_irq = err;
450 disable_irq(vif->tx_irq); 587 disable_irq(queue->tx_irq);
451 } else { 588 } else {
452 /* feature-split-event-channels == 1 */ 589 /* feature-split-event-channels == 1 */
453 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), 590 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
454 "%s-tx", vif->dev->name); 591 "%s-tx", queue->name);
455 err = bind_interdomain_evtchn_to_irqhandler( 592 err = bind_interdomain_evtchn_to_irqhandler(
456 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 593 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
457 vif->tx_irq_name, vif); 594 queue->tx_irq_name, queue);
458 if (err < 0) 595 if (err < 0)
459 goto err_unmap; 596 goto err_unmap;
460 vif->tx_irq = err; 597 queue->tx_irq = err;
461 disable_irq(vif->tx_irq); 598 disable_irq(queue->tx_irq);
462 599
463 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), 600 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
464 "%s-rx", vif->dev->name); 601 "%s-rx", queue->name);
465 err = bind_interdomain_evtchn_to_irqhandler( 602 err = bind_interdomain_evtchn_to_irqhandler(
466 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 603 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
467 vif->rx_irq_name, vif); 604 queue->rx_irq_name, queue);
468 if (err < 0) 605 if (err < 0)
469 goto err_tx_unbind; 606 goto err_tx_unbind;
470 vif->rx_irq = err; 607 queue->rx_irq = err;
471 disable_irq(vif->rx_irq); 608 disable_irq(queue->rx_irq);
472 } 609 }
473 610
474 task = kthread_create(xenvif_kthread_guest_rx, 611 task = kthread_create(xenvif_kthread_guest_rx,
475 (void *)vif, "%s-guest-rx", vif->dev->name); 612 (void *)queue, "%s-guest-rx", queue->name);
476 if (IS_ERR(task)) { 613 if (IS_ERR(task)) {
477 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 614 pr_warn("Could not allocate kthread for %s\n", queue->name);
478 err = PTR_ERR(task); 615 err = PTR_ERR(task);
479 goto err_rx_unbind; 616 goto err_rx_unbind;
480 } 617 }
481 618 queue->task = task;
482 vif->task = task;
483 619
484 task = kthread_create(xenvif_dealloc_kthread, 620 task = kthread_create(xenvif_dealloc_kthread,
485 (void *)vif, "%s-dealloc", vif->dev->name); 621 (void *)queue, "%s-dealloc", queue->name);
486 if (IS_ERR(task)) { 622 if (IS_ERR(task)) {
487 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 623 pr_warn("Could not allocate kthread for %s\n", queue->name);
488 err = PTR_ERR(task); 624 err = PTR_ERR(task);
489 goto err_rx_unbind; 625 goto err_rx_unbind;
490 } 626 }
627 queue->dealloc_task = task;
491 628
492 vif->dealloc_task = task; 629 wake_up_process(queue->task);
493 630 wake_up_process(queue->dealloc_task);
494 rtnl_lock();
495 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
496 dev_set_mtu(vif->dev, ETH_DATA_LEN);
497 netdev_update_features(vif->dev);
498 netif_carrier_on(vif->dev);
499 if (netif_running(vif->dev))
500 xenvif_up(vif);
501 rtnl_unlock();
502
503 wake_up_process(vif->task);
504 wake_up_process(vif->dealloc_task);
505 631
506 return 0; 632 return 0;
507 633
508err_rx_unbind: 634err_rx_unbind:
509 unbind_from_irqhandler(vif->rx_irq, vif); 635 unbind_from_irqhandler(queue->rx_irq, queue);
510 vif->rx_irq = 0; 636 queue->rx_irq = 0;
511err_tx_unbind: 637err_tx_unbind:
512 unbind_from_irqhandler(vif->tx_irq, vif); 638 unbind_from_irqhandler(queue->tx_irq, queue);
513 vif->tx_irq = 0; 639 queue->tx_irq = 0;
514err_unmap: 640err_unmap:
515 xenvif_unmap_frontend_rings(vif); 641 xenvif_unmap_frontend_rings(queue);
516err: 642err:
517 module_put(THIS_MODULE); 643 module_put(THIS_MODULE);
518 return err; 644 return err;
@@ -529,38 +655,77 @@ void xenvif_carrier_off(struct xenvif *vif)
529 rtnl_unlock(); 655 rtnl_unlock();
530} 656}
531 657
658static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
659 unsigned int worst_case_skb_lifetime)
660{
661 int i, unmap_timeout = 0;
662
663 for (i = 0; i < MAX_PENDING_REQS; ++i) {
664 if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
665 unmap_timeout++;
666 schedule_timeout(msecs_to_jiffies(1000));
667 if (unmap_timeout > worst_case_skb_lifetime &&
668 net_ratelimit())
669 netdev_err(queue->vif->dev,
670 "Page still granted! Index: %x\n",
671 i);
672 i = -1;
673 }
674 }
675}
676
532void xenvif_disconnect(struct xenvif *vif) 677void xenvif_disconnect(struct xenvif *vif)
533{ 678{
679 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues;
681 unsigned int queue_index;
682
534 if (netif_carrier_ok(vif->dev)) 683 if (netif_carrier_ok(vif->dev))
535 xenvif_carrier_off(vif); 684 xenvif_carrier_off(vif);
536 685
537 if (vif->task) { 686 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
538 del_timer_sync(&vif->wake_queue); 687 queue = &vif->queues[queue_index];
539 kthread_stop(vif->task);
540 vif->task = NULL;
541 }
542 688
543 if (vif->dealloc_task) { 689 if (queue->task) {
544 kthread_stop(vif->dealloc_task); 690 del_timer_sync(&queue->wake_queue);
545 vif->dealloc_task = NULL; 691 kthread_stop(queue->task);
546 } 692 queue->task = NULL;
693 }
547 694
548 if (vif->tx_irq) { 695 if (queue->dealloc_task) {
549 if (vif->tx_irq == vif->rx_irq) 696 kthread_stop(queue->dealloc_task);
550 unbind_from_irqhandler(vif->tx_irq, vif); 697 queue->dealloc_task = NULL;
551 else {
552 unbind_from_irqhandler(vif->tx_irq, vif);
553 unbind_from_irqhandler(vif->rx_irq, vif);
554 } 698 }
555 vif->tx_irq = 0; 699
700 if (queue->tx_irq) {
701 if (queue->tx_irq == queue->rx_irq)
702 unbind_from_irqhandler(queue->tx_irq, queue);
703 else {
704 unbind_from_irqhandler(queue->tx_irq, queue);
705 unbind_from_irqhandler(queue->rx_irq, queue);
706 }
707 queue->tx_irq = 0;
708 }
709
710 xenvif_unmap_frontend_rings(queue);
556 } 711 }
712}
557 713
558 xenvif_unmap_frontend_rings(vif); 714/* Reverse the relevant parts of xenvif_init_queue().
715 * Used for queue teardown from xenvif_free(), and on the
716 * error handling paths in xenbus.c:connect().
717 */
718void xenvif_deinit_queue(struct xenvif_queue *queue)
719{
720 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
721 netif_napi_del(&queue->napi);
559} 722}
560 723
561void xenvif_free(struct xenvif *vif) 724void xenvif_free(struct xenvif *vif)
562{ 725{
563 int i, unmap_timeout = 0; 726 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues;
728 unsigned int queue_index;
564 /* Here we want to avoid timeout messages if an skb can be legitimately 729 /* Here we want to avoid timeout messages if an skb can be legitimately
565 * stuck somewhere else. Realistically this could be an another vif's 730 * stuck somewhere else. Realistically this could be an another vif's
566 * internal or QDisc queue. That another vif also has this 731 * internal or QDisc queue. That another vif also has this
@@ -575,33 +740,21 @@ void xenvif_free(struct xenvif *vif)
575 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 740 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
576 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); 741 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
577 742
578 for (i = 0; i < MAX_PENDING_REQS; ++i) { 743 unregister_netdev(vif->dev);
579 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
580 unmap_timeout++;
581 schedule_timeout(msecs_to_jiffies(1000));
582 if (unmap_timeout > worst_case_skb_lifetime &&
583 net_ratelimit())
584 netdev_err(vif->dev,
585 "Page still granted! Index: %x\n",
586 i);
587 /* If there are still unmapped pages, reset the loop to
588 * start checking again. We shouldn't exit here until
589 * dealloc thread and NAPI instance release all the
590 * pages. If a kernel bug causes the skbs to stall
591 * somewhere, the interface cannot be brought down
592 * properly.
593 */
594 i = -1;
595 }
596 }
597
598 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
599 744
600 netif_napi_del(&vif->napi); 745 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
746 queue = &vif->queues[queue_index];
747 xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
748 xenvif_deinit_queue(queue);
749 }
601 750
602 unregister_netdev(vif->dev); 751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues);
756 vif->queues = NULL;
603 757
604 vfree(vif->grant_copy_op);
605 free_netdev(vif->dev); 758 free_netdev(vif->dev);
606 759
607 module_put(THIS_MODULE); 760 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7367208ee8cd..1844a47636b6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -62,6 +62,11 @@ unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444); 62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies; 63unsigned int rx_drain_timeout_jiffies;
64 64
65unsigned int xenvif_max_queues;
66module_param_named(max_queues, xenvif_max_queues, uint, 0644);
67MODULE_PARM_DESC(max_queues,
68 "Maximum number of queues per virtual interface");
69
65/* 70/*
66 * This is the maximum slots a skb can have. If a guest sends a skb 71 * This is the maximum slots a skb can have. If a guest sends a skb
67 * which exceeds this limit it is considered malicious. 72 * which exceeds this limit it is considered malicious.
@@ -70,33 +75,33 @@ unsigned int rx_drain_timeout_jiffies;
70static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 75static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
71module_param(fatal_skb_slots, uint, 0444); 76module_param(fatal_skb_slots, uint, 0444);
72 77
73static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 78static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
74 u8 status); 79 u8 status);
75 80
76static void make_tx_response(struct xenvif *vif, 81static void make_tx_response(struct xenvif_queue *queue,
77 struct xen_netif_tx_request *txp, 82 struct xen_netif_tx_request *txp,
78 s8 st); 83 s8 st);
79 84
80static inline int tx_work_todo(struct xenvif *vif); 85static inline int tx_work_todo(struct xenvif_queue *queue);
81static inline int rx_work_todo(struct xenvif *vif); 86static inline int rx_work_todo(struct xenvif_queue *queue);
82 87
83static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 88static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
84 u16 id, 89 u16 id,
85 s8 st, 90 s8 st,
86 u16 offset, 91 u16 offset,
87 u16 size, 92 u16 size,
88 u16 flags); 93 u16 flags);
89 94
90static inline unsigned long idx_to_pfn(struct xenvif *vif, 95static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
91 u16 idx) 96 u16 idx)
92{ 97{
93 return page_to_pfn(vif->mmap_pages[idx]); 98 return page_to_pfn(queue->mmap_pages[idx]);
94} 99}
95 100
96static inline unsigned long idx_to_kaddr(struct xenvif *vif, 101static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
97 u16 idx) 102 u16 idx)
98{ 103{
99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 104 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
100} 105}
101 106
102#define callback_param(vif, pending_idx) \ 107#define callback_param(vif, pending_idx) \
@@ -104,13 +109,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 109
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 110/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 111 */
107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) 112static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
108{ 113{
109 u16 pending_idx = ubuf->desc; 114 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 115 struct pending_tx_info *temp =
111 container_of(ubuf, struct pending_tx_info, callback_struct); 116 container_of(ubuf, struct pending_tx_info, callback_struct);
112 return container_of(temp - pending_idx, 117 return container_of(temp - pending_idx,
113 struct xenvif, 118 struct xenvif_queue,
114 pending_tx_info[0]); 119 pending_tx_info[0]);
115} 120}
116 121
@@ -136,24 +141,24 @@ static inline pending_ring_idx_t pending_index(unsigned i)
136 return i & (MAX_PENDING_REQS-1); 141 return i & (MAX_PENDING_REQS-1);
137} 142}
138 143
139bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) 144bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
140{ 145{
141 RING_IDX prod, cons; 146 RING_IDX prod, cons;
142 147
143 do { 148 do {
144 prod = vif->rx.sring->req_prod; 149 prod = queue->rx.sring->req_prod;
145 cons = vif->rx.req_cons; 150 cons = queue->rx.req_cons;
146 151
147 if (prod - cons >= needed) 152 if (prod - cons >= needed)
148 return true; 153 return true;
149 154
150 vif->rx.sring->req_event = prod + 1; 155 queue->rx.sring->req_event = prod + 1;
151 156
152 /* Make sure event is visible before we check prod 157 /* Make sure event is visible before we check prod
153 * again. 158 * again.
154 */ 159 */
155 mb(); 160 mb();
156 } while (vif->rx.sring->req_prod != prod); 161 } while (queue->rx.sring->req_prod != prod);
157 162
158 return false; 163 return false;
159} 164}
@@ -163,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
163 * adding 'size' bytes to a buffer which currently contains 'offset' 168 * adding 'size' bytes to a buffer which currently contains 'offset'
164 * bytes. 169 * bytes.
165 */ 170 */
166static bool start_new_rx_buffer(int offset, unsigned long size, int head) 171static bool start_new_rx_buffer(int offset, unsigned long size, int head,
172 bool full_coalesce)
167{ 173{
168 /* simple case: we have completely filled the current buffer. */ 174 /* simple case: we have completely filled the current buffer. */
169 if (offset == MAX_BUFFER_OFFSET) 175 if (offset == MAX_BUFFER_OFFSET)
@@ -175,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
175 * (i) this frag would fit completely in the next buffer 181 * (i) this frag would fit completely in the next buffer
176 * and (ii) there is already some data in the current buffer 182 * and (ii) there is already some data in the current buffer
177 * and (iii) this is not the head buffer. 183 * and (iii) this is not the head buffer.
184 * and (iv) there is no need to fully utilize the buffers
178 * 185 *
179 * Where: 186 * Where:
180 * - (i) stops us splitting a frag into two copies 187 * - (i) stops us splitting a frag into two copies
@@ -185,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
185 * by (ii) but is explicitly checked because 192 * by (ii) but is explicitly checked because
186 * netfront relies on the first buffer being 193 * netfront relies on the first buffer being
187 * non-empty and can crash otherwise. 194 * non-empty and can crash otherwise.
195 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
196 * slot
188 * 197 *
189 * This means we will effectively linearise small 198 * This means we will effectively linearise small
190 * frags but do not needlessly split large buffers 199 * frags but do not needlessly split large buffers
@@ -192,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
192 * own buffers as before. 201 * own buffers as before.
193 */ 202 */
194 BUG_ON(size > MAX_BUFFER_OFFSET); 203 BUG_ON(size > MAX_BUFFER_OFFSET);
195 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) 204 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
205 !full_coalesce)
196 return true; 206 return true;
197 207
198 return false; 208 return false;
@@ -207,13 +217,13 @@ struct netrx_pending_operations {
207 grant_ref_t copy_gref; 217 grant_ref_t copy_gref;
208}; 218};
209 219
210static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, 220static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
211 struct netrx_pending_operations *npo) 221 struct netrx_pending_operations *npo)
212{ 222{
213 struct xenvif_rx_meta *meta; 223 struct xenvif_rx_meta *meta;
214 struct xen_netif_rx_request *req; 224 struct xen_netif_rx_request *req;
215 225
216 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 226 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
217 227
218 meta = npo->meta + npo->meta_prod++; 228 meta = npo->meta + npo->meta_prod++;
219 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 229 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
@@ -227,15 +237,22 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
227 return meta; 237 return meta;
228} 238}
229 239
240struct xenvif_rx_cb {
241 int meta_slots_used;
242 bool full_coalesce;
243};
244
245#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
246
230/* 247/*
231 * Set up the grant operations for this fragment. If it's a flipping 248 * Set up the grant operations for this fragment. If it's a flipping
232 * interface, we also set up the unmap request from here. 249 * interface, we also set up the unmap request from here.
233 */ 250 */
234static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 251static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
235 struct netrx_pending_operations *npo, 252 struct netrx_pending_operations *npo,
236 struct page *page, unsigned long size, 253 struct page *page, unsigned long size,
237 unsigned long offset, int *head, 254 unsigned long offset, int *head,
238 struct xenvif *foreign_vif, 255 struct xenvif_queue *foreign_queue,
239 grant_ref_t foreign_gref) 256 grant_ref_t foreign_gref)
240{ 257{
241 struct gnttab_copy *copy_gop; 258 struct gnttab_copy *copy_gop;
@@ -261,14 +278,17 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
261 if (bytes > size) 278 if (bytes > size)
262 bytes = size; 279 bytes = size;
263 280
264 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { 281 if (start_new_rx_buffer(npo->copy_off,
282 bytes,
283 *head,
284 XENVIF_RX_CB(skb)->full_coalesce)) {
265 /* 285 /*
266 * Netfront requires there to be some data in the head 286 * Netfront requires there to be some data in the head
267 * buffer. 287 * buffer.
268 */ 288 */
269 BUG_ON(*head); 289 BUG_ON(*head);
270 290
271 meta = get_next_rx_buffer(vif, npo); 291 meta = get_next_rx_buffer(queue, npo);
272 } 292 }
273 293
274 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 294 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
@@ -278,8 +298,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
278 copy_gop->flags = GNTCOPY_dest_gref; 298 copy_gop->flags = GNTCOPY_dest_gref;
279 copy_gop->len = bytes; 299 copy_gop->len = bytes;
280 300
281 if (foreign_vif) { 301 if (foreign_queue) {
282 copy_gop->source.domid = foreign_vif->domid; 302 copy_gop->source.domid = foreign_queue->vif->domid;
283 copy_gop->source.u.ref = foreign_gref; 303 copy_gop->source.u.ref = foreign_gref;
284 copy_gop->flags |= GNTCOPY_source_gref; 304 copy_gop->flags |= GNTCOPY_source_gref;
285 } else { 305 } else {
@@ -289,7 +309,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
289 } 309 }
290 copy_gop->source.offset = offset; 310 copy_gop->source.offset = offset;
291 311
292 copy_gop->dest.domid = vif->domid; 312 copy_gop->dest.domid = queue->vif->domid;
293 copy_gop->dest.offset = npo->copy_off; 313 copy_gop->dest.offset = npo->copy_off;
294 copy_gop->dest.u.ref = npo->copy_gref; 314 copy_gop->dest.u.ref = npo->copy_gref;
295 315
@@ -314,8 +334,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
314 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 334 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
315 } 335 }
316 336
317 if (*head && ((1 << gso_type) & vif->gso_mask)) 337 if (*head && ((1 << gso_type) & queue->vif->gso_mask))
318 vif->rx.req_cons++; 338 queue->rx.req_cons++;
319 339
320 *head = 0; /* There must be something in this buffer now. */ 340 *head = 0; /* There must be something in this buffer now. */
321 341
@@ -337,13 +357,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i, 357 const int i,
338 const struct ubuf_info *ubuf) 358 const struct ubuf_info *ubuf)
339{ 359{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf); 360 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
341 361
342 do { 362 do {
343 u16 pending_idx = ubuf->desc; 363 u16 pending_idx = ubuf->desc;
344 364
345 if (skb_shinfo(skb)->frags[i].page.p == 365 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx]) 366 foreign_queue->mmap_pages[pending_idx])
347 break; 367 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx; 368 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf); 369 } while (ubuf);
@@ -364,7 +384,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
364 * frontend-side LRO). 384 * frontend-side LRO).
365 */ 385 */
366static int xenvif_gop_skb(struct sk_buff *skb, 386static int xenvif_gop_skb(struct sk_buff *skb,
367 struct netrx_pending_operations *npo) 387 struct netrx_pending_operations *npo,
388 struct xenvif_queue *queue)
368{ 389{
369 struct xenvif *vif = netdev_priv(skb->dev); 390 struct xenvif *vif = netdev_priv(skb->dev);
370 int nr_frags = skb_shinfo(skb)->nr_frags; 391 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -390,7 +411,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
390 411
391 /* Set up a GSO prefix descriptor, if necessary */ 412 /* Set up a GSO prefix descriptor, if necessary */
392 if ((1 << gso_type) & vif->gso_prefix_mask) { 413 if ((1 << gso_type) & vif->gso_prefix_mask) {
393 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 414 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
394 meta = npo->meta + npo->meta_prod++; 415 meta = npo->meta + npo->meta_prod++;
395 meta->gso_type = gso_type; 416 meta->gso_type = gso_type;
396 meta->gso_size = skb_shinfo(skb)->gso_size; 417 meta->gso_size = skb_shinfo(skb)->gso_size;
@@ -398,7 +419,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
398 meta->id = req->id; 419 meta->id = req->id;
399 } 420 }
400 421
401 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 422 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
402 meta = npo->meta + npo->meta_prod++; 423 meta = npo->meta + npo->meta_prod++;
403 424
404 if ((1 << gso_type) & vif->gso_mask) { 425 if ((1 << gso_type) & vif->gso_mask) {
@@ -422,7 +443,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
422 if (data + len > skb_tail_pointer(skb)) 443 if (data + len > skb_tail_pointer(skb))
423 len = skb_tail_pointer(skb) - data; 444 len = skb_tail_pointer(skb) - data;
424 445
425 xenvif_gop_frag_copy(vif, skb, npo, 446 xenvif_gop_frag_copy(queue, skb, npo,
426 virt_to_page(data), len, offset, &head, 447 virt_to_page(data), len, offset, &head,
427 NULL, 448 NULL,
428 0); 449 0);
@@ -433,7 +454,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
433 /* This variable also signals whether foreign_gref has a real 454 /* This variable also signals whether foreign_gref has a real
434 * value or not. 455 * value or not.
435 */ 456 */
436 struct xenvif *foreign_vif = NULL; 457 struct xenvif_queue *foreign_queue = NULL;
437 grant_ref_t foreign_gref; 458 grant_ref_t foreign_gref;
438 459
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && 460 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
@@ -458,8 +479,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
458 if (likely(ubuf)) { 479 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc; 480 u16 pending_idx = ubuf->desc;
460 481
461 foreign_vif = ubuf_to_vif(ubuf); 482 foreign_queue = ubuf_to_queue(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; 483 foreign_gref =
484 foreign_queue->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last 485 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will 486 * element on the list, the for loop will
465 * iterate again if a local page were added to 487 * iterate again if a local page were added to
@@ -477,13 +499,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
477 */ 499 */
478 ubuf = head_ubuf; 500 ubuf = head_ubuf;
479 } 501 }
480 xenvif_gop_frag_copy(vif, skb, npo, 502 xenvif_gop_frag_copy(queue, skb, npo,
481 skb_frag_page(&skb_shinfo(skb)->frags[i]), 503 skb_frag_page(&skb_shinfo(skb)->frags[i]),
482 skb_frag_size(&skb_shinfo(skb)->frags[i]), 504 skb_frag_size(&skb_shinfo(skb)->frags[i]),
483 skb_shinfo(skb)->frags[i].page_offset, 505 skb_shinfo(skb)->frags[i].page_offset,
484 &head, 506 &head,
485 foreign_vif, 507 foreign_queue,
486 foreign_vif ? foreign_gref : UINT_MAX); 508 foreign_queue ? foreign_gref : UINT_MAX);
487 } 509 }
488 510
489 return npo->meta_prod - old_meta_prod; 511 return npo->meta_prod - old_meta_prod;
@@ -515,7 +537,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
515 return status; 537 return status;
516} 538}
517 539
518static void xenvif_add_frag_responses(struct xenvif *vif, int status, 540static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
519 struct xenvif_rx_meta *meta, 541 struct xenvif_rx_meta *meta,
520 int nr_meta_slots) 542 int nr_meta_slots)
521{ 543{
@@ -536,23 +558,17 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
536 flags = XEN_NETRXF_more_data; 558 flags = XEN_NETRXF_more_data;
537 559
538 offset = 0; 560 offset = 0;
539 make_rx_response(vif, meta[i].id, status, offset, 561 make_rx_response(queue, meta[i].id, status, offset,
540 meta[i].size, flags); 562 meta[i].size, flags);
541 } 563 }
542} 564}
543 565
544struct xenvif_rx_cb { 566void xenvif_kick_thread(struct xenvif_queue *queue)
545 int meta_slots_used;
546};
547
548#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
549
550void xenvif_kick_thread(struct xenvif *vif)
551{ 567{
552 wake_up(&vif->wq); 568 wake_up(&queue->wq);
553} 569}
554 570
555static void xenvif_rx_action(struct xenvif *vif) 571static void xenvif_rx_action(struct xenvif_queue *queue)
556{ 572{
557 s8 status; 573 s8 status;
558 u16 flags; 574 u16 flags;
@@ -565,13 +581,13 @@ static void xenvif_rx_action(struct xenvif *vif)
565 bool need_to_notify = false; 581 bool need_to_notify = false;
566 582
567 struct netrx_pending_operations npo = { 583 struct netrx_pending_operations npo = {
568 .copy = vif->grant_copy_op, 584 .copy = queue->grant_copy_op,
569 .meta = vif->meta, 585 .meta = queue->meta,
570 }; 586 };
571 587
572 skb_queue_head_init(&rxq); 588 skb_queue_head_init(&rxq);
573 589
574 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 590 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
575 RING_IDX max_slots_needed; 591 RING_IDX max_slots_needed;
576 RING_IDX old_req_cons; 592 RING_IDX old_req_cons;
577 RING_IDX ring_slots_used; 593 RING_IDX ring_slots_used;
@@ -602,10 +618,15 @@ static void xenvif_rx_action(struct xenvif *vif)
602 618
603 /* To avoid the estimate becoming too pessimal for some 619 /* To avoid the estimate becoming too pessimal for some
604 * frontends that limit posted rx requests, cap the estimate 620 * frontends that limit posted rx requests, cap the estimate
605 * at MAX_SKB_FRAGS. 621 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
622 * the skb into the provided slots.
606 */ 623 */
607 if (max_slots_needed > MAX_SKB_FRAGS) 624 if (max_slots_needed > MAX_SKB_FRAGS) {
608 max_slots_needed = MAX_SKB_FRAGS; 625 max_slots_needed = MAX_SKB_FRAGS;
626 XENVIF_RX_CB(skb)->full_coalesce = true;
627 } else {
628 XENVIF_RX_CB(skb)->full_coalesce = false;
629 }
609 630
610 /* We may need one more slot for GSO metadata */ 631 /* We may need one more slot for GSO metadata */
611 if (skb_is_gso(skb) && 632 if (skb_is_gso(skb) &&
@@ -614,42 +635,42 @@ static void xenvif_rx_action(struct xenvif *vif)
614 max_slots_needed++; 635 max_slots_needed++;
615 636
616 /* If the skb may not fit then bail out now */ 637 /* If the skb may not fit then bail out now */
617 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 638 if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
618 skb_queue_head(&vif->rx_queue, skb); 639 skb_queue_head(&queue->rx_queue, skb);
619 need_to_notify = true; 640 need_to_notify = true;
620 vif->rx_last_skb_slots = max_slots_needed; 641 queue->rx_last_skb_slots = max_slots_needed;
621 break; 642 break;
622 } else 643 } else
623 vif->rx_last_skb_slots = 0; 644 queue->rx_last_skb_slots = 0;
624 645
625 old_req_cons = vif->rx.req_cons; 646 old_req_cons = queue->rx.req_cons;
626 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); 647 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
627 ring_slots_used = vif->rx.req_cons - old_req_cons; 648 ring_slots_used = queue->rx.req_cons - old_req_cons;
628 649
629 BUG_ON(ring_slots_used > max_slots_needed); 650 BUG_ON(ring_slots_used > max_slots_needed);
630 651
631 __skb_queue_tail(&rxq, skb); 652 __skb_queue_tail(&rxq, skb);
632 } 653 }
633 654
634 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 655 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
635 656
636 if (!npo.copy_prod) 657 if (!npo.copy_prod)
637 goto done; 658 goto done;
638 659
639 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); 660 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
640 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 661 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
641 662
642 while ((skb = __skb_dequeue(&rxq)) != NULL) { 663 while ((skb = __skb_dequeue(&rxq)) != NULL) {
643 664
644 if ((1 << vif->meta[npo.meta_cons].gso_type) & 665 if ((1 << queue->meta[npo.meta_cons].gso_type) &
645 vif->gso_prefix_mask) { 666 queue->vif->gso_prefix_mask) {
646 resp = RING_GET_RESPONSE(&vif->rx, 667 resp = RING_GET_RESPONSE(&queue->rx,
647 vif->rx.rsp_prod_pvt++); 668 queue->rx.rsp_prod_pvt++);
648 669
649 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; 670 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
650 671
651 resp->offset = vif->meta[npo.meta_cons].gso_size; 672 resp->offset = queue->meta[npo.meta_cons].gso_size;
652 resp->id = vif->meta[npo.meta_cons].id; 673 resp->id = queue->meta[npo.meta_cons].id;
653 resp->status = XENVIF_RX_CB(skb)->meta_slots_used; 674 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
654 675
655 npo.meta_cons++; 676 npo.meta_cons++;
@@ -657,10 +678,10 @@ static void xenvif_rx_action(struct xenvif *vif)
657 } 678 }
658 679
659 680
660 vif->dev->stats.tx_bytes += skb->len; 681 queue->stats.tx_bytes += skb->len;
661 vif->dev->stats.tx_packets++; 682 queue->stats.tx_packets++;
662 683
663 status = xenvif_check_gop(vif, 684 status = xenvif_check_gop(queue->vif,
664 XENVIF_RX_CB(skb)->meta_slots_used, 685 XENVIF_RX_CB(skb)->meta_slots_used,
665 &npo); 686 &npo);
666 687
@@ -676,22 +697,22 @@ static void xenvif_rx_action(struct xenvif *vif)
676 flags |= XEN_NETRXF_data_validated; 697 flags |= XEN_NETRXF_data_validated;
677 698
678 offset = 0; 699 offset = 0;
679 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, 700 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
680 status, offset, 701 status, offset,
681 vif->meta[npo.meta_cons].size, 702 queue->meta[npo.meta_cons].size,
682 flags); 703 flags);
683 704
684 if ((1 << vif->meta[npo.meta_cons].gso_type) & 705 if ((1 << queue->meta[npo.meta_cons].gso_type) &
685 vif->gso_mask) { 706 queue->vif->gso_mask) {
686 struct xen_netif_extra_info *gso = 707 struct xen_netif_extra_info *gso =
687 (struct xen_netif_extra_info *) 708 (struct xen_netif_extra_info *)
688 RING_GET_RESPONSE(&vif->rx, 709 RING_GET_RESPONSE(&queue->rx,
689 vif->rx.rsp_prod_pvt++); 710 queue->rx.rsp_prod_pvt++);
690 711
691 resp->flags |= XEN_NETRXF_extra_info; 712 resp->flags |= XEN_NETRXF_extra_info;
692 713
693 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; 714 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
694 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 715 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
695 gso->u.gso.pad = 0; 716 gso->u.gso.pad = 0;
696 gso->u.gso.features = 0; 717 gso->u.gso.features = 0;
697 718
@@ -699,11 +720,11 @@ static void xenvif_rx_action(struct xenvif *vif)
699 gso->flags = 0; 720 gso->flags = 0;
700 } 721 }
701 722
702 xenvif_add_frag_responses(vif, status, 723 xenvif_add_frag_responses(queue, status,
703 vif->meta + npo.meta_cons + 1, 724 queue->meta + npo.meta_cons + 1,
704 XENVIF_RX_CB(skb)->meta_slots_used); 725 XENVIF_RX_CB(skb)->meta_slots_used);
705 726
706 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 727 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
707 728
708 need_to_notify |= !!ret; 729 need_to_notify |= !!ret;
709 730
@@ -713,20 +734,20 @@ static void xenvif_rx_action(struct xenvif *vif)
713 734
714done: 735done:
715 if (need_to_notify) 736 if (need_to_notify)
716 notify_remote_via_irq(vif->rx_irq); 737 notify_remote_via_irq(queue->rx_irq);
717} 738}
718 739
719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) 740void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
720{ 741{
721 int more_to_do; 742 int more_to_do;
722 743
723 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); 744 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
724 745
725 if (more_to_do) 746 if (more_to_do)
726 napi_schedule(&vif->napi); 747 napi_schedule(&queue->napi);
727} 748}
728 749
729static void tx_add_credit(struct xenvif *vif) 750static void tx_add_credit(struct xenvif_queue *queue)
730{ 751{
731 unsigned long max_burst, max_credit; 752 unsigned long max_burst, max_credit;
732 753
@@ -734,55 +755,57 @@ static void tx_add_credit(struct xenvif *vif)
734 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 755 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
735 * Otherwise the interface can seize up due to insufficient credit. 756 * Otherwise the interface can seize up due to insufficient credit.
736 */ 757 */
737 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; 758 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
738 max_burst = min(max_burst, 131072UL); 759 max_burst = min(max_burst, 131072UL);
739 max_burst = max(max_burst, vif->credit_bytes); 760 max_burst = max(max_burst, queue->credit_bytes);
740 761
741 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 762 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
742 max_credit = vif->remaining_credit + vif->credit_bytes; 763 max_credit = queue->remaining_credit + queue->credit_bytes;
743 if (max_credit < vif->remaining_credit) 764 if (max_credit < queue->remaining_credit)
744 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 765 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
745 766
746 vif->remaining_credit = min(max_credit, max_burst); 767 queue->remaining_credit = min(max_credit, max_burst);
747} 768}
748 769
749static void tx_credit_callback(unsigned long data) 770static void tx_credit_callback(unsigned long data)
750{ 771{
751 struct xenvif *vif = (struct xenvif *)data; 772 struct xenvif_queue *queue = (struct xenvif_queue *)data;
752 tx_add_credit(vif); 773 tx_add_credit(queue);
753 xenvif_napi_schedule_or_enable_events(vif); 774 xenvif_napi_schedule_or_enable_events(queue);
754} 775}
755 776
756static void xenvif_tx_err(struct xenvif *vif, 777static void xenvif_tx_err(struct xenvif_queue *queue,
757 struct xen_netif_tx_request *txp, RING_IDX end) 778 struct xen_netif_tx_request *txp, RING_IDX end)
758{ 779{
759 RING_IDX cons = vif->tx.req_cons; 780 RING_IDX cons = queue->tx.req_cons;
760 unsigned long flags; 781 unsigned long flags;
761 782
762 do { 783 do {
763 spin_lock_irqsave(&vif->response_lock, flags); 784 spin_lock_irqsave(&queue->response_lock, flags);
764 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 785 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
765 spin_unlock_irqrestore(&vif->response_lock, flags); 786 spin_unlock_irqrestore(&queue->response_lock, flags);
766 if (cons == end) 787 if (cons == end)
767 break; 788 break;
768 txp = RING_GET_REQUEST(&vif->tx, cons++); 789 txp = RING_GET_REQUEST(&queue->tx, cons++);
769 } while (1); 790 } while (1);
770 vif->tx.req_cons = cons; 791 queue->tx.req_cons = cons;
771} 792}
772 793
773static void xenvif_fatal_tx_err(struct xenvif *vif) 794static void xenvif_fatal_tx_err(struct xenvif *vif)
774{ 795{
775 netdev_err(vif->dev, "fatal error; disabling device\n"); 796 netdev_err(vif->dev, "fatal error; disabling device\n");
776 vif->disabled = true; 797 vif->disabled = true;
777 xenvif_kick_thread(vif); 798 /* Disable the vif from queue 0's kthread */
799 if (vif->queues)
800 xenvif_kick_thread(&vif->queues[0]);
778} 801}
779 802
780static int xenvif_count_requests(struct xenvif *vif, 803static int xenvif_count_requests(struct xenvif_queue *queue,
781 struct xen_netif_tx_request *first, 804 struct xen_netif_tx_request *first,
782 struct xen_netif_tx_request *txp, 805 struct xen_netif_tx_request *txp,
783 int work_to_do) 806 int work_to_do)
784{ 807{
785 RING_IDX cons = vif->tx.req_cons; 808 RING_IDX cons = queue->tx.req_cons;
786 int slots = 0; 809 int slots = 0;
787 int drop_err = 0; 810 int drop_err = 0;
788 int more_data; 811 int more_data;
@@ -794,10 +817,10 @@ static int xenvif_count_requests(struct xenvif *vif,
794 struct xen_netif_tx_request dropped_tx = { 0 }; 817 struct xen_netif_tx_request dropped_tx = { 0 };
795 818
796 if (slots >= work_to_do) { 819 if (slots >= work_to_do) {
797 netdev_err(vif->dev, 820 netdev_err(queue->vif->dev,
798 "Asked for %d slots but exceeds this limit\n", 821 "Asked for %d slots but exceeds this limit\n",
799 work_to_do); 822 work_to_do);
800 xenvif_fatal_tx_err(vif); 823 xenvif_fatal_tx_err(queue->vif);
801 return -ENODATA; 824 return -ENODATA;
802 } 825 }
803 826
@@ -805,10 +828,10 @@ static int xenvif_count_requests(struct xenvif *vif,
805 * considered malicious. 828 * considered malicious.
806 */ 829 */
807 if (unlikely(slots >= fatal_skb_slots)) { 830 if (unlikely(slots >= fatal_skb_slots)) {
808 netdev_err(vif->dev, 831 netdev_err(queue->vif->dev,
809 "Malicious frontend using %d slots, threshold %u\n", 832 "Malicious frontend using %d slots, threshold %u\n",
810 slots, fatal_skb_slots); 833 slots, fatal_skb_slots);
811 xenvif_fatal_tx_err(vif); 834 xenvif_fatal_tx_err(queue->vif);
812 return -E2BIG; 835 return -E2BIG;
813 } 836 }
814 837
@@ -821,7 +844,7 @@ static int xenvif_count_requests(struct xenvif *vif,
821 */ 844 */
822 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { 845 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
823 if (net_ratelimit()) 846 if (net_ratelimit())
824 netdev_dbg(vif->dev, 847 netdev_dbg(queue->vif->dev,
825 "Too many slots (%d) exceeding limit (%d), dropping packet\n", 848 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
826 slots, XEN_NETBK_LEGACY_SLOTS_MAX); 849 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
827 drop_err = -E2BIG; 850 drop_err = -E2BIG;
@@ -830,7 +853,7 @@ static int xenvif_count_requests(struct xenvif *vif,
830 if (drop_err) 853 if (drop_err)
831 txp = &dropped_tx; 854 txp = &dropped_tx;
832 855
833 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), 856 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
834 sizeof(*txp)); 857 sizeof(*txp));
835 858
836 /* If the guest submitted a frame >= 64 KiB then 859 /* If the guest submitted a frame >= 64 KiB then
@@ -844,7 +867,7 @@ static int xenvif_count_requests(struct xenvif *vif,
844 */ 867 */
845 if (!drop_err && txp->size > first->size) { 868 if (!drop_err && txp->size > first->size) {
846 if (net_ratelimit()) 869 if (net_ratelimit())
847 netdev_dbg(vif->dev, 870 netdev_dbg(queue->vif->dev,
848 "Invalid tx request, slot size %u > remaining size %u\n", 871 "Invalid tx request, slot size %u > remaining size %u\n",
849 txp->size, first->size); 872 txp->size, first->size);
850 drop_err = -EIO; 873 drop_err = -EIO;
@@ -854,9 +877,9 @@ static int xenvif_count_requests(struct xenvif *vif,
854 slots++; 877 slots++;
855 878
856 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 879 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
857 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", 880 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
858 txp->offset, txp->size); 881 txp->offset, txp->size);
859 xenvif_fatal_tx_err(vif); 882 xenvif_fatal_tx_err(queue->vif);
860 return -EINVAL; 883 return -EINVAL;
861 } 884 }
862 885
@@ -868,7 +891,7 @@ static int xenvif_count_requests(struct xenvif *vif,
868 } while (more_data); 891 } while (more_data);
869 892
870 if (drop_err) { 893 if (drop_err) {
871 xenvif_tx_err(vif, first, cons + slots); 894 xenvif_tx_err(queue, first, cons + slots);
872 return drop_err; 895 return drop_err;
873 } 896 }
874 897
@@ -882,17 +905,17 @@ struct xenvif_tx_cb {
882 905
883#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) 906#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
884 907
885static inline void xenvif_tx_create_map_op(struct xenvif *vif, 908static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
886 u16 pending_idx, 909 u16 pending_idx,
887 struct xen_netif_tx_request *txp, 910 struct xen_netif_tx_request *txp,
888 struct gnttab_map_grant_ref *mop) 911 struct gnttab_map_grant_ref *mop)
889{ 912{
890 vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; 913 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
891 gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx), 914 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
892 GNTMAP_host_map | GNTMAP_readonly, 915 GNTMAP_host_map | GNTMAP_readonly,
893 txp->gref, vif->domid); 916 txp->gref, queue->vif->domid);
894 917
895 memcpy(&vif->pending_tx_info[pending_idx].req, txp, 918 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
896 sizeof(*txp)); 919 sizeof(*txp));
897} 920}
898 921
@@ -913,7 +936,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
913 return skb; 936 return skb;
914} 937}
915 938
916static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, 939static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
917 struct sk_buff *skb, 940 struct sk_buff *skb,
918 struct xen_netif_tx_request *txp, 941 struct xen_netif_tx_request *txp,
919 struct gnttab_map_grant_ref *gop) 942 struct gnttab_map_grant_ref *gop)
@@ -940,9 +963,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
940 963
941 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; 964 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
942 shinfo->nr_frags++, txp++, gop++) { 965 shinfo->nr_frags++, txp++, gop++) {
943 index = pending_index(vif->pending_cons++); 966 index = pending_index(queue->pending_cons++);
944 pending_idx = vif->pending_ring[index]; 967 pending_idx = queue->pending_ring[index];
945 xenvif_tx_create_map_op(vif, pending_idx, txp, gop); 968 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
946 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 969 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
947 } 970 }
948 971
@@ -950,7 +973,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
950 struct sk_buff *nskb = xenvif_alloc_skb(0); 973 struct sk_buff *nskb = xenvif_alloc_skb(0);
951 if (unlikely(nskb == NULL)) { 974 if (unlikely(nskb == NULL)) {
952 if (net_ratelimit()) 975 if (net_ratelimit())
953 netdev_err(vif->dev, 976 netdev_err(queue->vif->dev,
954 "Can't allocate the frag_list skb.\n"); 977 "Can't allocate the frag_list skb.\n");
955 return NULL; 978 return NULL;
956 } 979 }
@@ -960,9 +983,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
960 983
961 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; 984 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
962 shinfo->nr_frags++, txp++, gop++) { 985 shinfo->nr_frags++, txp++, gop++) {
963 index = pending_index(vif->pending_cons++); 986 index = pending_index(queue->pending_cons++);
964 pending_idx = vif->pending_ring[index]; 987 pending_idx = queue->pending_ring[index];
965 xenvif_tx_create_map_op(vif, pending_idx, txp, gop); 988 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
966 frag_set_pending_idx(&frags[shinfo->nr_frags], 989 frag_set_pending_idx(&frags[shinfo->nr_frags],
967 pending_idx); 990 pending_idx);
968 } 991 }
@@ -973,34 +996,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
973 return gop; 996 return gop;
974} 997}
975 998
976static inline void xenvif_grant_handle_set(struct xenvif *vif, 999static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
977 u16 pending_idx, 1000 u16 pending_idx,
978 grant_handle_t handle) 1001 grant_handle_t handle)
979{ 1002{
980 if (unlikely(vif->grant_tx_handle[pending_idx] != 1003 if (unlikely(queue->grant_tx_handle[pending_idx] !=
981 NETBACK_INVALID_HANDLE)) { 1004 NETBACK_INVALID_HANDLE)) {
982 netdev_err(vif->dev, 1005 netdev_err(queue->vif->dev,
983 "Trying to overwrite active handle! pending_idx: %x\n", 1006 "Trying to overwrite active handle! pending_idx: %x\n",
984 pending_idx); 1007 pending_idx);
985 BUG(); 1008 BUG();
986 } 1009 }
987 vif->grant_tx_handle[pending_idx] = handle; 1010 queue->grant_tx_handle[pending_idx] = handle;
988} 1011}
989 1012
990static inline void xenvif_grant_handle_reset(struct xenvif *vif, 1013static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
991 u16 pending_idx) 1014 u16 pending_idx)
992{ 1015{
993 if (unlikely(vif->grant_tx_handle[pending_idx] == 1016 if (unlikely(queue->grant_tx_handle[pending_idx] ==
994 NETBACK_INVALID_HANDLE)) { 1017 NETBACK_INVALID_HANDLE)) {
995 netdev_err(vif->dev, 1018 netdev_err(queue->vif->dev,
996 "Trying to unmap invalid handle! pending_idx: %x\n", 1019 "Trying to unmap invalid handle! pending_idx: %x\n",
997 pending_idx); 1020 pending_idx);
998 BUG(); 1021 BUG();
999 } 1022 }
1000 vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; 1023 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
1001} 1024}
1002 1025
1003static int xenvif_tx_check_gop(struct xenvif *vif, 1026static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1004 struct sk_buff *skb, 1027 struct sk_buff *skb,
1005 struct gnttab_map_grant_ref **gopp_map, 1028 struct gnttab_map_grant_ref **gopp_map,
1006 struct gnttab_copy **gopp_copy) 1029 struct gnttab_copy **gopp_copy)
@@ -1017,12 +1040,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
1017 (*gopp_copy)++; 1040 (*gopp_copy)++;
1018 if (unlikely(err)) { 1041 if (unlikely(err)) {
1019 if (net_ratelimit()) 1042 if (net_ratelimit())
1020 netdev_dbg(vif->dev, 1043 netdev_dbg(queue->vif->dev,
1021 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", 1044 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1022 (*gopp_copy)->status, 1045 (*gopp_copy)->status,
1023 pending_idx, 1046 pending_idx,
1024 (*gopp_copy)->source.u.ref); 1047 (*gopp_copy)->source.u.ref);
1025 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1026 } 1049 }
1027 1050
1028check_frags: 1051check_frags:
@@ -1035,24 +1058,24 @@ check_frags:
1035 newerr = gop_map->status; 1058 newerr = gop_map->status;
1036 1059
1037 if (likely(!newerr)) { 1060 if (likely(!newerr)) {
1038 xenvif_grant_handle_set(vif, 1061 xenvif_grant_handle_set(queue,
1039 pending_idx, 1062 pending_idx,
1040 gop_map->handle); 1063 gop_map->handle);
1041 /* Had a previous error? Invalidate this fragment. */ 1064 /* Had a previous error? Invalidate this fragment. */
1042 if (unlikely(err)) 1065 if (unlikely(err))
1043 xenvif_idx_unmap(vif, pending_idx); 1066 xenvif_idx_unmap(queue, pending_idx);
1044 continue; 1067 continue;
1045 } 1068 }
1046 1069
1047 /* Error on this fragment: respond to client with an error. */ 1070 /* Error on this fragment: respond to client with an error. */
1048 if (net_ratelimit()) 1071 if (net_ratelimit())
1049 netdev_dbg(vif->dev, 1072 netdev_dbg(queue->vif->dev,
1050 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", 1073 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1051 i, 1074 i,
1052 gop_map->status, 1075 gop_map->status,
1053 pending_idx, 1076 pending_idx,
1054 gop_map->ref); 1077 gop_map->ref);
1055 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1056 1079
1057 /* Not the first error? Preceding frags already invalidated. */ 1080 /* Not the first error? Preceding frags already invalidated. */
1058 if (err) 1081 if (err)
@@ -1060,7 +1083,7 @@ check_frags:
1060 /* First error: invalidate preceding fragments. */ 1083 /* First error: invalidate preceding fragments. */
1061 for (j = 0; j < i; j++) { 1084 for (j = 0; j < i; j++) {
1062 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1063 xenvif_idx_unmap(vif, pending_idx); 1086 xenvif_idx_unmap(queue, pending_idx);
1064 } 1087 }
1065 1088
1066 /* Remember the error: invalidate all subsequent fragments. */ 1089 /* Remember the error: invalidate all subsequent fragments. */
@@ -1084,7 +1107,7 @@ check_frags:
1084 shinfo = skb_shinfo(first_skb); 1107 shinfo = skb_shinfo(first_skb);
1085 for (j = 0; j < shinfo->nr_frags; j++) { 1108 for (j = 0; j < shinfo->nr_frags; j++) {
1086 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1087 xenvif_idx_unmap(vif, pending_idx); 1110 xenvif_idx_unmap(queue, pending_idx);
1088 } 1111 }
1089 } 1112 }
1090 1113
@@ -1092,7 +1115,7 @@ check_frags:
1092 return err; 1115 return err;
1093} 1116}
1094 1117
1095static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) 1118static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1096{ 1119{
1097 struct skb_shared_info *shinfo = skb_shinfo(skb); 1120 struct skb_shared_info *shinfo = skb_shinfo(skb);
1098 int nr_frags = shinfo->nr_frags; 1121 int nr_frags = shinfo->nr_frags;
@@ -1110,23 +1133,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1110 /* If this is not the first frag, chain it to the previous*/ 1133 /* If this is not the first frag, chain it to the previous*/
1111 if (prev_pending_idx == INVALID_PENDING_IDX) 1134 if (prev_pending_idx == INVALID_PENDING_IDX)
1112 skb_shinfo(skb)->destructor_arg = 1135 skb_shinfo(skb)->destructor_arg =
1113 &callback_param(vif, pending_idx); 1136 &callback_param(queue, pending_idx);
1114 else 1137 else
1115 callback_param(vif, prev_pending_idx).ctx = 1138 callback_param(queue, prev_pending_idx).ctx =
1116 &callback_param(vif, pending_idx); 1139 &callback_param(queue, pending_idx);
1117 1140
1118 callback_param(vif, pending_idx).ctx = NULL; 1141 callback_param(queue, pending_idx).ctx = NULL;
1119 prev_pending_idx = pending_idx; 1142 prev_pending_idx = pending_idx;
1120 1143
1121 txp = &vif->pending_tx_info[pending_idx].req; 1144 txp = &queue->pending_tx_info[pending_idx].req;
1122 page = virt_to_page(idx_to_kaddr(vif, pending_idx)); 1145 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1123 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1146 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1124 skb->len += txp->size; 1147 skb->len += txp->size;
1125 skb->data_len += txp->size; 1148 skb->data_len += txp->size;
1126 skb->truesize += txp->size; 1149 skb->truesize += txp->size;
1127 1150
1128 /* Take an extra reference to offset network stack's put_page */ 1151 /* Take an extra reference to offset network stack's put_page */
1129 get_page(vif->mmap_pages[pending_idx]); 1152 get_page(queue->mmap_pages[pending_idx]);
1130 } 1153 }
1131 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc 1154 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1132 * overlaps with "index", and "mapping" is not set. I think mapping 1155 * overlaps with "index", and "mapping" is not set. I think mapping
@@ -1136,33 +1159,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1136 skb->pfmemalloc = false; 1159 skb->pfmemalloc = false;
1137} 1160}
1138 1161
1139static int xenvif_get_extras(struct xenvif *vif, 1162static int xenvif_get_extras(struct xenvif_queue *queue,
1140 struct xen_netif_extra_info *extras, 1163 struct xen_netif_extra_info *extras,
1141 int work_to_do) 1164 int work_to_do)
1142{ 1165{
1143 struct xen_netif_extra_info extra; 1166 struct xen_netif_extra_info extra;
1144 RING_IDX cons = vif->tx.req_cons; 1167 RING_IDX cons = queue->tx.req_cons;
1145 1168
1146 do { 1169 do {
1147 if (unlikely(work_to_do-- <= 0)) { 1170 if (unlikely(work_to_do-- <= 0)) {
1148 netdev_err(vif->dev, "Missing extra info\n"); 1171 netdev_err(queue->vif->dev, "Missing extra info\n");
1149 xenvif_fatal_tx_err(vif); 1172 xenvif_fatal_tx_err(queue->vif);
1150 return -EBADR; 1173 return -EBADR;
1151 } 1174 }
1152 1175
1153 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), 1176 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
1154 sizeof(extra)); 1177 sizeof(extra));
1155 if (unlikely(!extra.type || 1178 if (unlikely(!extra.type ||
1156 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1179 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1157 vif->tx.req_cons = ++cons; 1180 queue->tx.req_cons = ++cons;
1158 netdev_err(vif->dev, 1181 netdev_err(queue->vif->dev,
1159 "Invalid extra type: %d\n", extra.type); 1182 "Invalid extra type: %d\n", extra.type);
1160 xenvif_fatal_tx_err(vif); 1183 xenvif_fatal_tx_err(queue->vif);
1161 return -EINVAL; 1184 return -EINVAL;
1162 } 1185 }
1163 1186
1164 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); 1187 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1165 vif->tx.req_cons = ++cons; 1188 queue->tx.req_cons = ++cons;
1166 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 1189 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1167 1190
1168 return work_to_do; 1191 return work_to_do;
@@ -1197,7 +1220,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1197 return 0; 1220 return 0;
1198} 1221}
1199 1222
1200static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1223static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1201{ 1224{
1202 bool recalculate_partial_csum = false; 1225 bool recalculate_partial_csum = false;
1203 1226
@@ -1207,7 +1230,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1207 * recalculate the partial checksum. 1230 * recalculate the partial checksum.
1208 */ 1231 */
1209 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1232 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1210 vif->rx_gso_checksum_fixup++; 1233 queue->stats.rx_gso_checksum_fixup++;
1211 skb->ip_summed = CHECKSUM_PARTIAL; 1234 skb->ip_summed = CHECKSUM_PARTIAL;
1212 recalculate_partial_csum = true; 1235 recalculate_partial_csum = true;
1213 } 1236 }
@@ -1219,31 +1242,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1219 return skb_checksum_setup(skb, recalculate_partial_csum); 1242 return skb_checksum_setup(skb, recalculate_partial_csum);
1220} 1243}
1221 1244
1222static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1245static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1223{ 1246{
1224 u64 now = get_jiffies_64(); 1247 u64 now = get_jiffies_64();
1225 u64 next_credit = vif->credit_window_start + 1248 u64 next_credit = queue->credit_window_start +
1226 msecs_to_jiffies(vif->credit_usec / 1000); 1249 msecs_to_jiffies(queue->credit_usec / 1000);
1227 1250
1228 /* Timer could already be pending in rare cases. */ 1251 /* Timer could already be pending in rare cases. */
1229 if (timer_pending(&vif->credit_timeout)) 1252 if (timer_pending(&queue->credit_timeout))
1230 return true; 1253 return true;
1231 1254
1232 /* Passed the point where we can replenish credit? */ 1255 /* Passed the point where we can replenish credit? */
1233 if (time_after_eq64(now, next_credit)) { 1256 if (time_after_eq64(now, next_credit)) {
1234 vif->credit_window_start = now; 1257 queue->credit_window_start = now;
1235 tx_add_credit(vif); 1258 tx_add_credit(queue);
1236 } 1259 }
1237 1260
1238 /* Still too big to send right now? Set a callback. */ 1261 /* Still too big to send right now? Set a callback. */
1239 if (size > vif->remaining_credit) { 1262 if (size > queue->remaining_credit) {
1240 vif->credit_timeout.data = 1263 queue->credit_timeout.data =
1241 (unsigned long)vif; 1264 (unsigned long)queue;
1242 vif->credit_timeout.function = 1265 queue->credit_timeout.function =
1243 tx_credit_callback; 1266 tx_credit_callback;
1244 mod_timer(&vif->credit_timeout, 1267 mod_timer(&queue->credit_timeout,
1245 next_credit); 1268 next_credit);
1246 vif->credit_window_start = next_credit; 1269 queue->credit_window_start = next_credit;
1247 1270
1248 return true; 1271 return true;
1249 } 1272 }
@@ -1251,16 +1274,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1251 return false; 1274 return false;
1252} 1275}
1253 1276
1254static void xenvif_tx_build_gops(struct xenvif *vif, 1277static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1255 int budget, 1278 int budget,
1256 unsigned *copy_ops, 1279 unsigned *copy_ops,
1257 unsigned *map_ops) 1280 unsigned *map_ops)
1258{ 1281{
1259 struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop; 1282 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
1260 struct sk_buff *skb; 1283 struct sk_buff *skb;
1261 int ret; 1284 int ret;
1262 1285
1263 while (skb_queue_len(&vif->tx_queue) < budget) { 1286 while (skb_queue_len(&queue->tx_queue) < budget) {
1264 struct xen_netif_tx_request txreq; 1287 struct xen_netif_tx_request txreq;
1265 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1288 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1266 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1289 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
@@ -1270,69 +1293,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1270 unsigned int data_len; 1293 unsigned int data_len;
1271 pending_ring_idx_t index; 1294 pending_ring_idx_t index;
1272 1295
1273 if (vif->tx.sring->req_prod - vif->tx.req_cons > 1296 if (queue->tx.sring->req_prod - queue->tx.req_cons >
1274 XEN_NETIF_TX_RING_SIZE) { 1297 XEN_NETIF_TX_RING_SIZE) {
1275 netdev_err(vif->dev, 1298 netdev_err(queue->vif->dev,
1276 "Impossible number of requests. " 1299 "Impossible number of requests. "
1277 "req_prod %d, req_cons %d, size %ld\n", 1300 "req_prod %d, req_cons %d, size %ld\n",
1278 vif->tx.sring->req_prod, vif->tx.req_cons, 1301 queue->tx.sring->req_prod, queue->tx.req_cons,
1279 XEN_NETIF_TX_RING_SIZE); 1302 XEN_NETIF_TX_RING_SIZE);
1280 xenvif_fatal_tx_err(vif); 1303 xenvif_fatal_tx_err(queue->vif);
1281 break; 1304 break;
1282 } 1305 }
1283 1306
1284 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); 1307 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1285 if (!work_to_do) 1308 if (!work_to_do)
1286 break; 1309 break;
1287 1310
1288 idx = vif->tx.req_cons; 1311 idx = queue->tx.req_cons;
1289 rmb(); /* Ensure that we see the request before we copy it. */ 1312 rmb(); /* Ensure that we see the request before we copy it. */
1290 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); 1313 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
1291 1314
1292 /* Credit-based scheduling. */ 1315 /* Credit-based scheduling. */
1293 if (txreq.size > vif->remaining_credit && 1316 if (txreq.size > queue->remaining_credit &&
1294 tx_credit_exceeded(vif, txreq.size)) 1317 tx_credit_exceeded(queue, txreq.size))
1295 break; 1318 break;
1296 1319
1297 vif->remaining_credit -= txreq.size; 1320 queue->remaining_credit -= txreq.size;
1298 1321
1299 work_to_do--; 1322 work_to_do--;
1300 vif->tx.req_cons = ++idx; 1323 queue->tx.req_cons = ++idx;
1301 1324
1302 memset(extras, 0, sizeof(extras)); 1325 memset(extras, 0, sizeof(extras));
1303 if (txreq.flags & XEN_NETTXF_extra_info) { 1326 if (txreq.flags & XEN_NETTXF_extra_info) {
1304 work_to_do = xenvif_get_extras(vif, extras, 1327 work_to_do = xenvif_get_extras(queue, extras,
1305 work_to_do); 1328 work_to_do);
1306 idx = vif->tx.req_cons; 1329 idx = queue->tx.req_cons;
1307 if (unlikely(work_to_do < 0)) 1330 if (unlikely(work_to_do < 0))
1308 break; 1331 break;
1309 } 1332 }
1310 1333
1311 ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); 1334 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1312 if (unlikely(ret < 0)) 1335 if (unlikely(ret < 0))
1313 break; 1336 break;
1314 1337
1315 idx += ret; 1338 idx += ret;
1316 1339
1317 if (unlikely(txreq.size < ETH_HLEN)) { 1340 if (unlikely(txreq.size < ETH_HLEN)) {
1318 netdev_dbg(vif->dev, 1341 netdev_dbg(queue->vif->dev,
1319 "Bad packet size: %d\n", txreq.size); 1342 "Bad packet size: %d\n", txreq.size);
1320 xenvif_tx_err(vif, &txreq, idx); 1343 xenvif_tx_err(queue, &txreq, idx);
1321 break; 1344 break;
1322 } 1345 }
1323 1346
1324 /* No crossing a page as the payload mustn't fragment. */ 1347 /* No crossing a page as the payload mustn't fragment. */
1325 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1348 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1326 netdev_err(vif->dev, 1349 netdev_err(queue->vif->dev,
1327 "txreq.offset: %x, size: %u, end: %lu\n", 1350 "txreq.offset: %x, size: %u, end: %lu\n",
1328 txreq.offset, txreq.size, 1351 txreq.offset, txreq.size,
1329 (txreq.offset&~PAGE_MASK) + txreq.size); 1352 (txreq.offset&~PAGE_MASK) + txreq.size);
1330 xenvif_fatal_tx_err(vif); 1353 xenvif_fatal_tx_err(queue->vif);
1331 break; 1354 break;
1332 } 1355 }
1333 1356
1334 index = pending_index(vif->pending_cons); 1357 index = pending_index(queue->pending_cons);
1335 pending_idx = vif->pending_ring[index]; 1358 pending_idx = queue->pending_ring[index];
1336 1359
1337 data_len = (txreq.size > PKT_PROT_LEN && 1360 data_len = (txreq.size > PKT_PROT_LEN &&
1338 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1361 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1340,9 +1363,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1340 1363
1341 skb = xenvif_alloc_skb(data_len); 1364 skb = xenvif_alloc_skb(data_len);
1342 if (unlikely(skb == NULL)) { 1365 if (unlikely(skb == NULL)) {
1343 netdev_dbg(vif->dev, 1366 netdev_dbg(queue->vif->dev,
1344 "Can't allocate a skb in start_xmit.\n"); 1367 "Can't allocate a skb in start_xmit.\n");
1345 xenvif_tx_err(vif, &txreq, idx); 1368 xenvif_tx_err(queue, &txreq, idx);
1346 break; 1369 break;
1347 } 1370 }
1348 1371
@@ -1350,7 +1373,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1350 struct xen_netif_extra_info *gso; 1373 struct xen_netif_extra_info *gso;
1351 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1374 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1352 1375
1353 if (xenvif_set_skb_gso(vif, skb, gso)) { 1376 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1354 /* Failure in xenvif_set_skb_gso is fatal. */ 1377 /* Failure in xenvif_set_skb_gso is fatal. */
1355 kfree_skb(skb); 1378 kfree_skb(skb);
1356 break; 1379 break;
@@ -1360,18 +1383,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1360 XENVIF_TX_CB(skb)->pending_idx = pending_idx; 1383 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1361 1384
1362 __skb_put(skb, data_len); 1385 __skb_put(skb, data_len);
1363 vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; 1386 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1364 vif->tx_copy_ops[*copy_ops].source.domid = vif->domid; 1387 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1365 vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset; 1388 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1366 1389
1367 vif->tx_copy_ops[*copy_ops].dest.u.gmfn = 1390 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1368 virt_to_mfn(skb->data); 1391 virt_to_mfn(skb->data);
1369 vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; 1392 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1370 vif->tx_copy_ops[*copy_ops].dest.offset = 1393 queue->tx_copy_ops[*copy_ops].dest.offset =
1371 offset_in_page(skb->data); 1394 offset_in_page(skb->data);
1372 1395
1373 vif->tx_copy_ops[*copy_ops].len = data_len; 1396 queue->tx_copy_ops[*copy_ops].len = data_len;
1374 vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; 1397 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1375 1398
1376 (*copy_ops)++; 1399 (*copy_ops)++;
1377 1400
@@ -1380,42 +1403,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1380 skb_shinfo(skb)->nr_frags++; 1403 skb_shinfo(skb)->nr_frags++;
1381 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1404 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1382 pending_idx); 1405 pending_idx);
1383 xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop); 1406 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1384 gop++; 1407 gop++;
1385 } else { 1408 } else {
1386 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1409 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1387 INVALID_PENDING_IDX); 1410 INVALID_PENDING_IDX);
1388 memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, 1411 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1389 sizeof(txreq)); 1412 sizeof(txreq));
1390 } 1413 }
1391 1414
1392 vif->pending_cons++; 1415 queue->pending_cons++;
1393 1416
1394 request_gop = xenvif_get_requests(vif, skb, txfrags, gop); 1417 request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
1395 if (request_gop == NULL) { 1418 if (request_gop == NULL) {
1396 kfree_skb(skb); 1419 kfree_skb(skb);
1397 xenvif_tx_err(vif, &txreq, idx); 1420 xenvif_tx_err(queue, &txreq, idx);
1398 break; 1421 break;
1399 } 1422 }
1400 gop = request_gop; 1423 gop = request_gop;
1401 1424
1402 __skb_queue_tail(&vif->tx_queue, skb); 1425 __skb_queue_tail(&queue->tx_queue, skb);
1403 1426
1404 vif->tx.req_cons = idx; 1427 queue->tx.req_cons = idx;
1405 1428
1406 if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) || 1429 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1407 (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops))) 1430 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1408 break; 1431 break;
1409 } 1432 }
1410 1433
1411 (*map_ops) = gop - vif->tx_map_ops; 1434 (*map_ops) = gop - queue->tx_map_ops;
1412 return; 1435 return;
1413} 1436}
1414 1437
1415/* Consolidate skb with a frag_list into a brand new one with local pages on 1438/* Consolidate skb with a frag_list into a brand new one with local pages on
1416 * frags. Returns 0 or -ENOMEM if can't allocate new pages. 1439 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1417 */ 1440 */
1418static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) 1441static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1419{ 1442{
1420 unsigned int offset = skb_headlen(skb); 1443 unsigned int offset = skb_headlen(skb);
1421 skb_frag_t frags[MAX_SKB_FRAGS]; 1444 skb_frag_t frags[MAX_SKB_FRAGS];
@@ -1423,10 +1446,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1423 struct ubuf_info *uarg; 1446 struct ubuf_info *uarg;
1424 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1447 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1425 1448
1426 vif->tx_zerocopy_sent += 2; 1449 queue->stats.tx_zerocopy_sent += 2;
1427 vif->tx_frag_overflow++; 1450 queue->stats.tx_frag_overflow++;
1428 1451
1429 xenvif_fill_frags(vif, nskb); 1452 xenvif_fill_frags(queue, nskb);
1430 /* Subtract frags size, we will correct it later */ 1453 /* Subtract frags size, we will correct it later */
1431 skb->truesize -= skb->data_len; 1454 skb->truesize -= skb->data_len;
1432 skb->len += nskb->len; 1455 skb->len += nskb->len;
@@ -1478,37 +1501,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1478 return 0; 1501 return 0;
1479} 1502}
1480 1503
1481static int xenvif_tx_submit(struct xenvif *vif) 1504static int xenvif_tx_submit(struct xenvif_queue *queue)
1482{ 1505{
1483 struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; 1506 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1484 struct gnttab_copy *gop_copy = vif->tx_copy_ops; 1507 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1485 struct sk_buff *skb; 1508 struct sk_buff *skb;
1486 int work_done = 0; 1509 int work_done = 0;
1487 1510
1488 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { 1511 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1489 struct xen_netif_tx_request *txp; 1512 struct xen_netif_tx_request *txp;
1490 u16 pending_idx; 1513 u16 pending_idx;
1491 unsigned data_len; 1514 unsigned data_len;
1492 1515
1493 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1516 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1494 txp = &vif->pending_tx_info[pending_idx].req; 1517 txp = &queue->pending_tx_info[pending_idx].req;
1495 1518
1496 /* Check the remap error code. */ 1519 /* Check the remap error code. */
1497 if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { 1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1498 skb_shinfo(skb)->nr_frags = 0; 1521 skb_shinfo(skb)->nr_frags = 0;
1499 kfree_skb(skb); 1522 kfree_skb(skb);
1500 continue; 1523 continue;
1501 } 1524 }
1502 1525
1503 data_len = skb->len; 1526 data_len = skb->len;
1504 callback_param(vif, pending_idx).ctx = NULL; 1527 callback_param(queue, pending_idx).ctx = NULL;
1505 if (data_len < txp->size) { 1528 if (data_len < txp->size) {
1506 /* Append the packet payload as a fragment. */ 1529 /* Append the packet payload as a fragment. */
1507 txp->offset += data_len; 1530 txp->offset += data_len;
1508 txp->size -= data_len; 1531 txp->size -= data_len;
1509 } else { 1532 } else {
1510 /* Schedule a response immediately. */ 1533 /* Schedule a response immediately. */
1511 xenvif_idx_release(vif, pending_idx, 1534 xenvif_idx_release(queue, pending_idx,
1512 XEN_NETIF_RSP_OKAY); 1535 XEN_NETIF_RSP_OKAY);
1513 } 1536 }
1514 1537
@@ -1517,12 +1540,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
1517 else if (txp->flags & XEN_NETTXF_data_validated) 1540 else if (txp->flags & XEN_NETTXF_data_validated)
1518 skb->ip_summed = CHECKSUM_UNNECESSARY; 1541 skb->ip_summed = CHECKSUM_UNNECESSARY;
1519 1542
1520 xenvif_fill_frags(vif, skb); 1543 xenvif_fill_frags(queue, skb);
1521 1544
1522 if (unlikely(skb_has_frag_list(skb))) { 1545 if (unlikely(skb_has_frag_list(skb))) {
1523 if (xenvif_handle_frag_list(vif, skb)) { 1546 if (xenvif_handle_frag_list(queue, skb)) {
1524 if (net_ratelimit()) 1547 if (net_ratelimit())
1525 netdev_err(vif->dev, 1548 netdev_err(queue->vif->dev,
1526 "Not enough memory to consolidate frag_list!\n"); 1549 "Not enough memory to consolidate frag_list!\n");
1527 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1550 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1528 kfree_skb(skb); 1551 kfree_skb(skb);
@@ -1535,12 +1558,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
1535 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1558 __pskb_pull_tail(skb, target - skb_headlen(skb));
1536 } 1559 }
1537 1560
1538 skb->dev = vif->dev; 1561 skb->dev = queue->vif->dev;
1539 skb->protocol = eth_type_trans(skb, skb->dev); 1562 skb->protocol = eth_type_trans(skb, skb->dev);
1540 skb_reset_network_header(skb); 1563 skb_reset_network_header(skb);
1541 1564
1542 if (checksum_setup(vif, skb)) { 1565 if (checksum_setup(queue, skb)) {
1543 netdev_dbg(vif->dev, 1566 netdev_dbg(queue->vif->dev,
1544 "Can't setup checksum in net_tx_action\n"); 1567 "Can't setup checksum in net_tx_action\n");
1545 /* We have to set this flag to trigger the callback */ 1568 /* We have to set this flag to trigger the callback */
1546 if (skb_shinfo(skb)->destructor_arg) 1569 if (skb_shinfo(skb)->destructor_arg)
@@ -1565,8 +1588,8 @@ static int xenvif_tx_submit(struct xenvif *vif)
1565 DIV_ROUND_UP(skb->len - hdrlen, mss); 1588 DIV_ROUND_UP(skb->len - hdrlen, mss);
1566 } 1589 }
1567 1590
1568 vif->dev->stats.rx_bytes += skb->len; 1591 queue->stats.rx_bytes += skb->len;
1569 vif->dev->stats.rx_packets++; 1592 queue->stats.rx_packets++;
1570 1593
1571 work_done++; 1594 work_done++;
1572 1595
@@ -1577,7 +1600,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
1577 */ 1600 */
1578 if (skb_shinfo(skb)->destructor_arg) { 1601 if (skb_shinfo(skb)->destructor_arg) {
1579 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1602 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1580 vif->tx_zerocopy_sent++; 1603 queue->stats.tx_zerocopy_sent++;
1581 } 1604 }
1582 1605
1583 netif_receive_skb(skb); 1606 netif_receive_skb(skb);
@@ -1590,47 +1613,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1590{ 1613{
1591 unsigned long flags; 1614 unsigned long flags;
1592 pending_ring_idx_t index; 1615 pending_ring_idx_t index;
1593 struct xenvif *vif = ubuf_to_vif(ubuf); 1616 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1594 1617
1595 /* This is the only place where we grab this lock, to protect callbacks 1618 /* This is the only place where we grab this lock, to protect callbacks
1596 * from each other. 1619 * from each other.
1597 */ 1620 */
1598 spin_lock_irqsave(&vif->callback_lock, flags); 1621 spin_lock_irqsave(&queue->callback_lock, flags);
1599 do { 1622 do {
1600 u16 pending_idx = ubuf->desc; 1623 u16 pending_idx = ubuf->desc;
1601 ubuf = (struct ubuf_info *) ubuf->ctx; 1624 ubuf = (struct ubuf_info *) ubuf->ctx;
1602 BUG_ON(vif->dealloc_prod - vif->dealloc_cons >= 1625 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1603 MAX_PENDING_REQS); 1626 MAX_PENDING_REQS);
1604 index = pending_index(vif->dealloc_prod); 1627 index = pending_index(queue->dealloc_prod);
1605 vif->dealloc_ring[index] = pending_idx; 1628 queue->dealloc_ring[index] = pending_idx;
1606 /* Sync with xenvif_tx_dealloc_action: 1629 /* Sync with xenvif_tx_dealloc_action:
1607 * insert idx then incr producer. 1630 * insert idx then incr producer.
1608 */ 1631 */
1609 smp_wmb(); 1632 smp_wmb();
1610 vif->dealloc_prod++; 1633 queue->dealloc_prod++;
1611 } while (ubuf); 1634 } while (ubuf);
1612 wake_up(&vif->dealloc_wq); 1635 wake_up(&queue->dealloc_wq);
1613 spin_unlock_irqrestore(&vif->callback_lock, flags); 1636 spin_unlock_irqrestore(&queue->callback_lock, flags);
1614 1637
1615 if (likely(zerocopy_success)) 1638 if (likely(zerocopy_success))
1616 vif->tx_zerocopy_success++; 1639 queue->stats.tx_zerocopy_success++;
1617 else 1640 else
1618 vif->tx_zerocopy_fail++; 1641 queue->stats.tx_zerocopy_fail++;
1619} 1642}
1620 1643
1621static inline void xenvif_tx_dealloc_action(struct xenvif *vif) 1644static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1622{ 1645{
1623 struct gnttab_unmap_grant_ref *gop; 1646 struct gnttab_unmap_grant_ref *gop;
1624 pending_ring_idx_t dc, dp; 1647 pending_ring_idx_t dc, dp;
1625 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; 1648 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1626 unsigned int i = 0; 1649 unsigned int i = 0;
1627 1650
1628 dc = vif->dealloc_cons; 1651 dc = queue->dealloc_cons;
1629 gop = vif->tx_unmap_ops; 1652 gop = queue->tx_unmap_ops;
1630 1653
1631 /* Free up any grants we have finished using */ 1654 /* Free up any grants we have finished using */
1632 do { 1655 do {
1633 dp = vif->dealloc_prod; 1656 dp = queue->dealloc_prod;
1634 1657
1635 /* Ensure we see all indices enqueued by all 1658 /* Ensure we see all indices enqueued by all
1636 * xenvif_zerocopy_callback(). 1659 * xenvif_zerocopy_callback().
@@ -1638,38 +1661,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1638 smp_rmb(); 1661 smp_rmb();
1639 1662
1640 while (dc != dp) { 1663 while (dc != dp) {
1641 BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS); 1664 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
1642 pending_idx = 1665 pending_idx =
1643 vif->dealloc_ring[pending_index(dc++)]; 1666 queue->dealloc_ring[pending_index(dc++)];
1644 1667
1645 pending_idx_release[gop-vif->tx_unmap_ops] = 1668 pending_idx_release[gop-queue->tx_unmap_ops] =
1646 pending_idx; 1669 pending_idx;
1647 vif->pages_to_unmap[gop-vif->tx_unmap_ops] = 1670 queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
1648 vif->mmap_pages[pending_idx]; 1671 queue->mmap_pages[pending_idx];
1649 gnttab_set_unmap_op(gop, 1672 gnttab_set_unmap_op(gop,
1650 idx_to_kaddr(vif, pending_idx), 1673 idx_to_kaddr(queue, pending_idx),
1651 GNTMAP_host_map, 1674 GNTMAP_host_map,
1652 vif->grant_tx_handle[pending_idx]); 1675 queue->grant_tx_handle[pending_idx]);
1653 xenvif_grant_handle_reset(vif, pending_idx); 1676 xenvif_grant_handle_reset(queue, pending_idx);
1654 ++gop; 1677 ++gop;
1655 } 1678 }
1656 1679
1657 } while (dp != vif->dealloc_prod); 1680 } while (dp != queue->dealloc_prod);
1658 1681
1659 vif->dealloc_cons = dc; 1682 queue->dealloc_cons = dc;
1660 1683
1661 if (gop - vif->tx_unmap_ops > 0) { 1684 if (gop - queue->tx_unmap_ops > 0) {
1662 int ret; 1685 int ret;
1663 ret = gnttab_unmap_refs(vif->tx_unmap_ops, 1686 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1664 NULL, 1687 NULL,
1665 vif->pages_to_unmap, 1688 queue->pages_to_unmap,
1666 gop - vif->tx_unmap_ops); 1689 gop - queue->tx_unmap_ops);
1667 if (ret) { 1690 if (ret) {
1668 netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n", 1691 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1669 gop - vif->tx_unmap_ops, ret); 1692 gop - queue->tx_unmap_ops, ret);
1670 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) { 1693 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1671 if (gop[i].status != GNTST_okay) 1694 if (gop[i].status != GNTST_okay)
1672 netdev_err(vif->dev, 1695 netdev_err(queue->vif->dev,
1673 " host_addr: %llx handle: %x status: %d\n", 1696 " host_addr: %llx handle: %x status: %d\n",
1674 gop[i].host_addr, 1697 gop[i].host_addr,
1675 gop[i].handle, 1698 gop[i].handle,
@@ -1679,91 +1702,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1679 } 1702 }
1680 } 1703 }
1681 1704
1682 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) 1705 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1683 xenvif_idx_release(vif, pending_idx_release[i], 1706 xenvif_idx_release(queue, pending_idx_release[i],
1684 XEN_NETIF_RSP_OKAY); 1707 XEN_NETIF_RSP_OKAY);
1685} 1708}
1686 1709
1687 1710
1688/* Called after netfront has transmitted */ 1711/* Called after netfront has transmitted */
1689int xenvif_tx_action(struct xenvif *vif, int budget) 1712int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1690{ 1713{
1691 unsigned nr_mops, nr_cops = 0; 1714 unsigned nr_mops, nr_cops = 0;
1692 int work_done, ret; 1715 int work_done, ret;
1693 1716
1694 if (unlikely(!tx_work_todo(vif))) 1717 if (unlikely(!tx_work_todo(queue)))
1695 return 0; 1718 return 0;
1696 1719
1697 xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops); 1720 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1698 1721
1699 if (nr_cops == 0) 1722 if (nr_cops == 0)
1700 return 0; 1723 return 0;
1701 1724
1702 gnttab_batch_copy(vif->tx_copy_ops, nr_cops); 1725 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1703 if (nr_mops != 0) { 1726 if (nr_mops != 0) {
1704 ret = gnttab_map_refs(vif->tx_map_ops, 1727 ret = gnttab_map_refs(queue->tx_map_ops,
1705 NULL, 1728 NULL,
1706 vif->pages_to_map, 1729 queue->pages_to_map,
1707 nr_mops); 1730 nr_mops);
1708 BUG_ON(ret); 1731 BUG_ON(ret);
1709 } 1732 }
1710 1733
1711 work_done = xenvif_tx_submit(vif); 1734 work_done = xenvif_tx_submit(queue);
1712 1735
1713 return work_done; 1736 return work_done;
1714} 1737}
1715 1738
1716static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 1739static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1717 u8 status) 1740 u8 status)
1718{ 1741{
1719 struct pending_tx_info *pending_tx_info; 1742 struct pending_tx_info *pending_tx_info;
1720 pending_ring_idx_t index; 1743 pending_ring_idx_t index;
1721 unsigned long flags; 1744 unsigned long flags;
1722 1745
1723 pending_tx_info = &vif->pending_tx_info[pending_idx]; 1746 pending_tx_info = &queue->pending_tx_info[pending_idx];
1724 spin_lock_irqsave(&vif->response_lock, flags); 1747 spin_lock_irqsave(&queue->response_lock, flags);
1725 make_tx_response(vif, &pending_tx_info->req, status); 1748 make_tx_response(queue, &pending_tx_info->req, status);
1726 index = pending_index(vif->pending_prod); 1749 index = pending_index(queue->pending_prod);
1727 vif->pending_ring[index] = pending_idx; 1750 queue->pending_ring[index] = pending_idx;
1728 /* TX shouldn't use the index before we give it back here */ 1751 /* TX shouldn't use the index before we give it back here */
1729 mb(); 1752 mb();
1730 vif->pending_prod++; 1753 queue->pending_prod++;
1731 spin_unlock_irqrestore(&vif->response_lock, flags); 1754 spin_unlock_irqrestore(&queue->response_lock, flags);
1732} 1755}
1733 1756
1734 1757
1735static void make_tx_response(struct xenvif *vif, 1758static void make_tx_response(struct xenvif_queue *queue,
1736 struct xen_netif_tx_request *txp, 1759 struct xen_netif_tx_request *txp,
1737 s8 st) 1760 s8 st)
1738{ 1761{
1739 RING_IDX i = vif->tx.rsp_prod_pvt; 1762 RING_IDX i = queue->tx.rsp_prod_pvt;
1740 struct xen_netif_tx_response *resp; 1763 struct xen_netif_tx_response *resp;
1741 int notify; 1764 int notify;
1742 1765
1743 resp = RING_GET_RESPONSE(&vif->tx, i); 1766 resp = RING_GET_RESPONSE(&queue->tx, i);
1744 resp->id = txp->id; 1767 resp->id = txp->id;
1745 resp->status = st; 1768 resp->status = st;
1746 1769
1747 if (txp->flags & XEN_NETTXF_extra_info) 1770 if (txp->flags & XEN_NETTXF_extra_info)
1748 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1771 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1749 1772
1750 vif->tx.rsp_prod_pvt = ++i; 1773 queue->tx.rsp_prod_pvt = ++i;
1751 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); 1774 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1752 if (notify) 1775 if (notify)
1753 notify_remote_via_irq(vif->tx_irq); 1776 notify_remote_via_irq(queue->tx_irq);
1754} 1777}
1755 1778
1756static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 1779static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1757 u16 id, 1780 u16 id,
1758 s8 st, 1781 s8 st,
1759 u16 offset, 1782 u16 offset,
1760 u16 size, 1783 u16 size,
1761 u16 flags) 1784 u16 flags)
1762{ 1785{
1763 RING_IDX i = vif->rx.rsp_prod_pvt; 1786 RING_IDX i = queue->rx.rsp_prod_pvt;
1764 struct xen_netif_rx_response *resp; 1787 struct xen_netif_rx_response *resp;
1765 1788
1766 resp = RING_GET_RESPONSE(&vif->rx, i); 1789 resp = RING_GET_RESPONSE(&queue->rx, i);
1767 resp->offset = offset; 1790 resp->offset = offset;
1768 resp->flags = flags; 1791 resp->flags = flags;
1769 resp->id = id; 1792 resp->id = id;
@@ -1771,26 +1794,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1771 if (st < 0) 1794 if (st < 0)
1772 resp->status = (s16)st; 1795 resp->status = (s16)st;
1773 1796
1774 vif->rx.rsp_prod_pvt = ++i; 1797 queue->rx.rsp_prod_pvt = ++i;
1775 1798
1776 return resp; 1799 return resp;
1777} 1800}
1778 1801
1779void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx) 1802void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1780{ 1803{
1781 int ret; 1804 int ret;
1782 struct gnttab_unmap_grant_ref tx_unmap_op; 1805 struct gnttab_unmap_grant_ref tx_unmap_op;
1783 1806
1784 gnttab_set_unmap_op(&tx_unmap_op, 1807 gnttab_set_unmap_op(&tx_unmap_op,
1785 idx_to_kaddr(vif, pending_idx), 1808 idx_to_kaddr(queue, pending_idx),
1786 GNTMAP_host_map, 1809 GNTMAP_host_map,
1787 vif->grant_tx_handle[pending_idx]); 1810 queue->grant_tx_handle[pending_idx]);
1788 xenvif_grant_handle_reset(vif, pending_idx); 1811 xenvif_grant_handle_reset(queue, pending_idx);
1789 1812
1790 ret = gnttab_unmap_refs(&tx_unmap_op, NULL, 1813 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1791 &vif->mmap_pages[pending_idx], 1); 1814 &queue->mmap_pages[pending_idx], 1);
1792 if (ret) { 1815 if (ret) {
1793 netdev_err(vif->dev, 1816 netdev_err(queue->vif->dev,
1794 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", 1817 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1795 ret, 1818 ret,
1796 pending_idx, 1819 pending_idx,
@@ -1800,41 +1823,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1800 BUG(); 1823 BUG();
1801 } 1824 }
1802 1825
1803 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1804} 1827}
1805 1828
1806static inline int rx_work_todo(struct xenvif *vif) 1829static inline int rx_work_todo(struct xenvif_queue *queue)
1807{ 1830{
1808 return (!skb_queue_empty(&vif->rx_queue) && 1831 return (!skb_queue_empty(&queue->rx_queue) &&
1809 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) || 1832 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
1810 vif->rx_queue_purge; 1833 queue->rx_queue_purge;
1811} 1834}
1812 1835
1813static inline int tx_work_todo(struct xenvif *vif) 1836static inline int tx_work_todo(struct xenvif_queue *queue)
1814{ 1837{
1815 1838 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1816 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1817 return 1; 1839 return 1;
1818 1840
1819 return 0; 1841 return 0;
1820} 1842}
1821 1843
1822static inline bool tx_dealloc_work_todo(struct xenvif *vif) 1844static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1823{ 1845{
1824 return vif->dealloc_cons != vif->dealloc_prod; 1846 return queue->dealloc_cons != queue->dealloc_prod;
1825} 1847}
1826 1848
1827void xenvif_unmap_frontend_rings(struct xenvif *vif) 1849void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1828{ 1850{
1829 if (vif->tx.sring) 1851 if (queue->tx.sring)
1830 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1852 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1831 vif->tx.sring); 1853 queue->tx.sring);
1832 if (vif->rx.sring) 1854 if (queue->rx.sring)
1833 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1855 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1834 vif->rx.sring); 1856 queue->rx.sring);
1835} 1857}
1836 1858
1837int xenvif_map_frontend_rings(struct xenvif *vif, 1859int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1838 grant_ref_t tx_ring_ref, 1860 grant_ref_t tx_ring_ref,
1839 grant_ref_t rx_ring_ref) 1861 grant_ref_t rx_ring_ref)
1840{ 1862{
@@ -1844,85 +1866,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
1844 1866
1845 int err = -ENOMEM; 1867 int err = -ENOMEM;
1846 1868
1847 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 1869 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1848 tx_ring_ref, &addr); 1870 tx_ring_ref, &addr);
1849 if (err) 1871 if (err)
1850 goto err; 1872 goto err;
1851 1873
1852 txs = (struct xen_netif_tx_sring *)addr; 1874 txs = (struct xen_netif_tx_sring *)addr;
1853 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); 1875 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1854 1876
1855 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 1877 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1856 rx_ring_ref, &addr); 1878 rx_ring_ref, &addr);
1857 if (err) 1879 if (err)
1858 goto err; 1880 goto err;
1859 1881
1860 rxs = (struct xen_netif_rx_sring *)addr; 1882 rxs = (struct xen_netif_rx_sring *)addr;
1861 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); 1883 BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1862 1884
1863 return 0; 1885 return 0;
1864 1886
1865err: 1887err:
1866 xenvif_unmap_frontend_rings(vif); 1888 xenvif_unmap_frontend_rings(queue);
1867 return err; 1889 return err;
1868} 1890}
1869 1891
1870void xenvif_stop_queue(struct xenvif *vif) 1892static void xenvif_start_queue(struct xenvif_queue *queue)
1871{ 1893{
1872 if (!vif->can_queue) 1894 if (xenvif_schedulable(queue->vif))
1873 return; 1895 xenvif_wake_queue(queue);
1874
1875 netif_stop_queue(vif->dev);
1876}
1877
1878static void xenvif_start_queue(struct xenvif *vif)
1879{
1880 if (xenvif_schedulable(vif))
1881 netif_wake_queue(vif->dev);
1882} 1896}
1883 1897
1884int xenvif_kthread_guest_rx(void *data) 1898int xenvif_kthread_guest_rx(void *data)
1885{ 1899{
1886 struct xenvif *vif = data; 1900 struct xenvif_queue *queue = data;
1887 struct sk_buff *skb; 1901 struct sk_buff *skb;
1888 1902
1889 while (!kthread_should_stop()) { 1903 while (!kthread_should_stop()) {
1890 wait_event_interruptible(vif->wq, 1904 wait_event_interruptible(queue->wq,
1891 rx_work_todo(vif) || 1905 rx_work_todo(queue) ||
1892 vif->disabled || 1906 queue->vif->disabled ||
1893 kthread_should_stop()); 1907 kthread_should_stop());
1894 1908
1895 /* This frontend is found to be rogue, disable it in 1909 /* This frontend is found to be rogue, disable it in
1896 * kthread context. Currently this is only set when 1910 * kthread context. Currently this is only set when
1897 * netback finds out frontend sends malformed packet, 1911 * netback finds out frontend sends malformed packet,
1898 * but we cannot disable the interface in softirq 1912 * but we cannot disable the interface in softirq
1899 * context so we defer it here. 1913 * context so we defer it here, if this thread is
1914 * associated with queue 0.
1900 */ 1915 */
1901 if (unlikely(vif->disabled && netif_carrier_ok(vif->dev))) 1916 if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
1902 xenvif_carrier_off(vif); 1917 xenvif_carrier_off(queue->vif);
1903 1918
1904 if (kthread_should_stop()) 1919 if (kthread_should_stop())
1905 break; 1920 break;
1906 1921
1907 if (vif->rx_queue_purge) { 1922 if (queue->rx_queue_purge) {
1908 skb_queue_purge(&vif->rx_queue); 1923 skb_queue_purge(&queue->rx_queue);
1909 vif->rx_queue_purge = false; 1924 queue->rx_queue_purge = false;
1910 } 1925 }
1911 1926
1912 if (!skb_queue_empty(&vif->rx_queue)) 1927 if (!skb_queue_empty(&queue->rx_queue))
1913 xenvif_rx_action(vif); 1928 xenvif_rx_action(queue);
1914 1929
1915 if (skb_queue_empty(&vif->rx_queue) && 1930 if (skb_queue_empty(&queue->rx_queue) &&
1916 netif_queue_stopped(vif->dev)) { 1931 xenvif_queue_stopped(queue)) {
1917 del_timer_sync(&vif->wake_queue); 1932 del_timer_sync(&queue->wake_queue);
1918 xenvif_start_queue(vif); 1933 xenvif_start_queue(queue);
1919 } 1934 }
1920 1935
1921 cond_resched(); 1936 cond_resched();
1922 } 1937 }
1923 1938
1924 /* Bin any remaining skbs */ 1939 /* Bin any remaining skbs */
1925 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) 1940 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
1926 dev_kfree_skb(skb); 1941 dev_kfree_skb(skb);
1927 1942
1928 return 0; 1943 return 0;
@@ -1930,22 +1945,22 @@ int xenvif_kthread_guest_rx(void *data)
1930 1945
1931int xenvif_dealloc_kthread(void *data) 1946int xenvif_dealloc_kthread(void *data)
1932{ 1947{
1933 struct xenvif *vif = data; 1948 struct xenvif_queue *queue = data;
1934 1949
1935 while (!kthread_should_stop()) { 1950 while (!kthread_should_stop()) {
1936 wait_event_interruptible(vif->dealloc_wq, 1951 wait_event_interruptible(queue->dealloc_wq,
1937 tx_dealloc_work_todo(vif) || 1952 tx_dealloc_work_todo(queue) ||
1938 kthread_should_stop()); 1953 kthread_should_stop());
1939 if (kthread_should_stop()) 1954 if (kthread_should_stop())
1940 break; 1955 break;
1941 1956
1942 xenvif_tx_dealloc_action(vif); 1957 xenvif_tx_dealloc_action(queue);
1943 cond_resched(); 1958 cond_resched();
1944 } 1959 }
1945 1960
1946 /* Unmap anything remaining*/ 1961 /* Unmap anything remaining*/
1947 if (tx_dealloc_work_todo(vif)) 1962 if (tx_dealloc_work_todo(queue))
1948 xenvif_tx_dealloc_action(vif); 1963 xenvif_tx_dealloc_action(queue);
1949 1964
1950 return 0; 1965 return 0;
1951} 1966}
@@ -1957,6 +1972,9 @@ static int __init netback_init(void)
1957 if (!xen_domain()) 1972 if (!xen_domain())
1958 return -ENODEV; 1973 return -ENODEV;
1959 1974
1975 /* Allow as many queues as there are CPUs, by default */
1976 xenvif_max_queues = num_online_cpus();
1977
1960 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { 1978 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1961 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", 1979 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1962 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); 1980 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 7a206cffb062..96c63dc2509e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -19,6 +19,8 @@
19*/ 19*/
20 20
21#include "common.h" 21#include "common.h"
22#include <linux/vmalloc.h>
23#include <linux/rtnetlink.h>
22 24
23struct backend_info { 25struct backend_info {
24 struct xenbus_device *dev; 26 struct xenbus_device *dev;
@@ -34,8 +36,9 @@ struct backend_info {
34 u8 have_hotplug_status_watch:1; 36 u8 have_hotplug_status_watch:1;
35}; 37};
36 38
37static int connect_rings(struct backend_info *); 39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
38static void connect(struct backend_info *); 40static void connect(struct backend_info *be);
41static int read_xenbus_vif_flags(struct backend_info *be);
39static void backend_create_xenvif(struct backend_info *be); 42static void backend_create_xenvif(struct backend_info *be);
40static void unregister_hotplug_status_watch(struct backend_info *be); 43static void unregister_hotplug_status_watch(struct backend_info *be);
41static void set_backend_state(struct backend_info *be, 44static void set_backend_state(struct backend_info *be,
@@ -157,6 +160,12 @@ static int netback_probe(struct xenbus_device *dev,
157 if (err) 160 if (err)
158 pr_debug("Error writing feature-split-event-channels\n"); 161 pr_debug("Error writing feature-split-event-channels\n");
159 162
163 /* Multi-queue support: This is an optional feature. */
164 err = xenbus_printf(XBT_NIL, dev->nodename,
165 "multi-queue-max-queues", "%u", xenvif_max_queues);
166 if (err)
167 pr_debug("Error writing multi-queue-max-queues\n");
168
160 err = xenbus_switch_state(dev, XenbusStateInitWait); 169 err = xenbus_switch_state(dev, XenbusStateInitWait);
161 if (err) 170 if (err)
162 goto fail; 171 goto fail;
@@ -485,10 +494,26 @@ static void connect(struct backend_info *be)
485{ 494{
486 int err; 495 int err;
487 struct xenbus_device *dev = be->dev; 496 struct xenbus_device *dev = be->dev;
497 unsigned long credit_bytes, credit_usec;
498 unsigned int queue_index;
499 unsigned int requested_num_queues;
500 struct xenvif_queue *queue;
488 501
489 err = connect_rings(be); 502 /* Check whether the frontend requested multiple queues
490 if (err) 503 * and read the number requested.
504 */
505 err = xenbus_scanf(XBT_NIL, dev->otherend,
506 "multi-queue-num-queues",
507 "%u", &requested_num_queues);
508 if (err < 0) {
509 requested_num_queues = 1; /* Fall back to single queue */
510 } else if (requested_num_queues > xenvif_max_queues) {
511 /* buggy or malicious guest */
512 xenbus_dev_fatal(dev, err,
513 "guest requested %u queues, exceeding the maximum of %u.",
514 requested_num_queues, xenvif_max_queues);
491 return; 515 return;
516 }
492 517
493 err = xen_net_read_mac(dev, be->vif->fe_dev_addr); 518 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
494 if (err) { 519 if (err) {
@@ -496,9 +521,54 @@ static void connect(struct backend_info *be)
496 return; 521 return;
497 } 522 }
498 523
499 xen_net_read_rate(dev, &be->vif->credit_bytes, 524 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
500 &be->vif->credit_usec); 525 read_xenbus_vif_flags(be);
501 be->vif->remaining_credit = be->vif->credit_bytes; 526
527 /* Use the number of queues requested by the frontend */
528 be->vif->queues = vzalloc(requested_num_queues *
529 sizeof(struct xenvif_queue));
530 rtnl_lock();
531 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
532 rtnl_unlock();
533
534 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
535 queue = &be->vif->queues[queue_index];
536 queue->vif = be->vif;
537 queue->id = queue_index;
538 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
539 be->vif->dev->name, queue->id);
540
541 err = xenvif_init_queue(queue);
542 if (err) {
543 /* xenvif_init_queue() cleans up after itself on
544 * failure, but we need to clean up any previously
545 * initialised queues. Set num_queues to i so that
546 * earlier queues can be destroyed using the regular
547 * disconnect logic.
548 */
549 rtnl_lock();
550 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
551 rtnl_unlock();
552 goto err;
553 }
554
555 queue->remaining_credit = credit_bytes;
556
557 err = connect_rings(be, queue);
558 if (err) {
559 /* connect_rings() cleans up after itself on failure,
560 * but we need to clean up after xenvif_init_queue() here,
561 * and also clean up any previously initialised queues.
562 */
563 xenvif_deinit_queue(queue);
564 rtnl_lock();
565 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
566 rtnl_unlock();
567 goto err;
568 }
569 }
570
571 xenvif_carrier_on(be->vif);
502 572
503 unregister_hotplug_status_watch(be); 573 unregister_hotplug_status_watch(be);
504 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 574 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
@@ -507,45 +577,109 @@ static void connect(struct backend_info *be)
507 if (!err) 577 if (!err)
508 be->have_hotplug_status_watch = 1; 578 be->have_hotplug_status_watch = 1;
509 579
510 netif_wake_queue(be->vif->dev); 580 netif_tx_wake_all_queues(be->vif->dev);
581
582 return;
583
584err:
585 if (be->vif->dev->real_num_tx_queues > 0)
586 xenvif_disconnect(be->vif); /* Clean up existing queues */
587 vfree(be->vif->queues);
588 be->vif->queues = NULL;
589 rtnl_lock();
590 netif_set_real_num_tx_queues(be->vif->dev, 0);
591 rtnl_unlock();
592 return;
511} 593}
512 594
513 595
514static int connect_rings(struct backend_info *be) 596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
515{ 597{
516 struct xenvif *vif = be->vif;
517 struct xenbus_device *dev = be->dev; 598 struct xenbus_device *dev = be->dev;
599 unsigned int num_queues = queue->vif->dev->real_num_tx_queues;
518 unsigned long tx_ring_ref, rx_ring_ref; 600 unsigned long tx_ring_ref, rx_ring_ref;
519 unsigned int tx_evtchn, rx_evtchn, rx_copy; 601 unsigned int tx_evtchn, rx_evtchn;
520 int err; 602 int err;
521 int val; 603 char *xspath;
604 size_t xspathsize;
605 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
606
607 /* If the frontend requested 1 queue, or we have fallen back
608 * to single queue due to lack of frontend support for multi-
609 * queue, expect the remaining XenStore keys in the toplevel
610 * directory. Otherwise, expect them in a subdirectory called
611 * queue-N.
612 */
613 if (num_queues == 1) {
614 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
615 if (!xspath) {
616 xenbus_dev_fatal(dev, -ENOMEM,
617 "reading ring references");
618 return -ENOMEM;
619 }
620 strcpy(xspath, dev->otherend);
621 } else {
622 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
623 xspath = kzalloc(xspathsize, GFP_KERNEL);
624 if (!xspath) {
625 xenbus_dev_fatal(dev, -ENOMEM,
626 "reading ring references");
627 return -ENOMEM;
628 }
629 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
630 queue->id);
631 }
522 632
523 err = xenbus_gather(XBT_NIL, dev->otherend, 633 err = xenbus_gather(XBT_NIL, xspath,
524 "tx-ring-ref", "%lu", &tx_ring_ref, 634 "tx-ring-ref", "%lu", &tx_ring_ref,
525 "rx-ring-ref", "%lu", &rx_ring_ref, NULL); 635 "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
526 if (err) { 636 if (err) {
527 xenbus_dev_fatal(dev, err, 637 xenbus_dev_fatal(dev, err,
528 "reading %s/ring-ref", 638 "reading %s/ring-ref",
529 dev->otherend); 639 xspath);
530 return err; 640 goto err;
531 } 641 }
532 642
533 /* Try split event channels first, then single event channel. */ 643 /* Try split event channels first, then single event channel. */
534 err = xenbus_gather(XBT_NIL, dev->otherend, 644 err = xenbus_gather(XBT_NIL, xspath,
535 "event-channel-tx", "%u", &tx_evtchn, 645 "event-channel-tx", "%u", &tx_evtchn,
536 "event-channel-rx", "%u", &rx_evtchn, NULL); 646 "event-channel-rx", "%u", &rx_evtchn, NULL);
537 if (err < 0) { 647 if (err < 0) {
538 err = xenbus_scanf(XBT_NIL, dev->otherend, 648 err = xenbus_scanf(XBT_NIL, xspath,
539 "event-channel", "%u", &tx_evtchn); 649 "event-channel", "%u", &tx_evtchn);
540 if (err < 0) { 650 if (err < 0) {
541 xenbus_dev_fatal(dev, err, 651 xenbus_dev_fatal(dev, err,
542 "reading %s/event-channel(-tx/rx)", 652 "reading %s/event-channel(-tx/rx)",
543 dev->otherend); 653 xspath);
544 return err; 654 goto err;
545 } 655 }
546 rx_evtchn = tx_evtchn; 656 rx_evtchn = tx_evtchn;
547 } 657 }
548 658
659 /* Map the shared frame, irq etc. */
660 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
661 tx_evtchn, rx_evtchn);
662 if (err) {
663 xenbus_dev_fatal(dev, err,
664 "mapping shared-frames %lu/%lu port tx %u rx %u",
665 tx_ring_ref, rx_ring_ref,
666 tx_evtchn, rx_evtchn);
667 goto err;
668 }
669
670 err = 0;
671err: /* Regular return falls through with err == 0 */
672 kfree(xspath);
673 return err;
674}
675
676static int read_xenbus_vif_flags(struct backend_info *be)
677{
678 struct xenvif *vif = be->vif;
679 struct xenbus_device *dev = be->dev;
680 unsigned int rx_copy;
681 int err, val;
682
549 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 683 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
550 &rx_copy); 684 &rx_copy);
551 if (err == -ENOENT) { 685 if (err == -ENOENT) {
@@ -621,16 +755,6 @@ static int connect_rings(struct backend_info *be)
621 val = 0; 755 val = 0;
622 vif->ipv6_csum = !!val; 756 vif->ipv6_csum = !!val;
623 757
624 /* Map the shared frame, irq etc. */
625 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
626 tx_evtchn, rx_evtchn);
627 if (err) {
628 xenbus_dev_fatal(dev, err,
629 "mapping shared-frames %lu/%lu port tx %u rx %u",
630 tx_ring_ref, rx_ring_ref,
631 tx_evtchn, rx_evtchn);
632 return err;
633 }
634 return 0; 758 return 0;
635} 759}
636 760
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 158b5e639fc7..5a7872ac3566 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,6 +57,12 @@
57#include <xen/interface/memory.h> 57#include <xen/interface/memory.h>
58#include <xen/interface/grant_table.h> 58#include <xen/interface/grant_table.h>
59 59
60/* Module parameters */
61static unsigned int xennet_max_queues;
62module_param_named(max_queues, xennet_max_queues, uint, 0644);
63MODULE_PARM_DESC(max_queues,
64 "Maximum number of queues per virtual interface");
65
60static const struct ethtool_ops xennet_ethtool_ops; 66static const struct ethtool_ops xennet_ethtool_ops;
61 67
62struct netfront_cb { 68struct netfront_cb {
@@ -73,6 +79,12 @@ struct netfront_cb {
73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 79#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) 80#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
75 81
82/* Queue name is interface name with "-qNNN" appended */
83#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
84
85/* IRQ name is queue name with "-tx" or "-rx" appended */
86#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
87
76struct netfront_stats { 88struct netfront_stats {
77 u64 rx_packets; 89 u64 rx_packets;
78 u64 tx_packets; 90 u64 tx_packets;
@@ -81,9 +93,12 @@ struct netfront_stats {
81 struct u64_stats_sync syncp; 93 struct u64_stats_sync syncp;
82}; 94};
83 95
84struct netfront_info { 96struct netfront_info;
85 struct list_head list; 97
86 struct net_device *netdev; 98struct netfront_queue {
99 unsigned int id; /* Queue ID, 0-based */
100 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
101 struct netfront_info *info;
87 102
88 struct napi_struct napi; 103 struct napi_struct napi;
89 104
@@ -93,10 +108,8 @@ struct netfront_info {
93 unsigned int tx_evtchn, rx_evtchn; 108 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq; 109 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */ 110 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 111 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 112 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
98
99 struct xenbus_device *xbdev;
100 113
101 spinlock_t tx_lock; 114 spinlock_t tx_lock;
102 struct xen_netif_tx_front_ring tx; 115 struct xen_netif_tx_front_ring tx;
@@ -140,11 +153,21 @@ struct netfront_info {
140 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 153 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
141 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 154 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
142 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 155 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
156};
157
158struct netfront_info {
159 struct list_head list;
160 struct net_device *netdev;
161
162 struct xenbus_device *xbdev;
163
164 /* Multi-queue support */
165 struct netfront_queue *queues;
143 166
144 /* Statistics */ 167 /* Statistics */
145 struct netfront_stats __percpu *stats; 168 struct netfront_stats __percpu *stats;
146 169
147 unsigned long rx_gso_checksum_fixup; 170 atomic_t rx_gso_checksum_fixup;
148}; 171};
149 172
150struct netfront_rx_info { 173struct netfront_rx_info {
@@ -187,21 +210,21 @@ static int xennet_rxidx(RING_IDX idx)
187 return idx & (NET_RX_RING_SIZE - 1); 210 return idx & (NET_RX_RING_SIZE - 1);
188} 211}
189 212
190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 213static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
191 RING_IDX ri) 214 RING_IDX ri)
192{ 215{
193 int i = xennet_rxidx(ri); 216 int i = xennet_rxidx(ri);
194 struct sk_buff *skb = np->rx_skbs[i]; 217 struct sk_buff *skb = queue->rx_skbs[i];
195 np->rx_skbs[i] = NULL; 218 queue->rx_skbs[i] = NULL;
196 return skb; 219 return skb;
197} 220}
198 221
199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 222static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
200 RING_IDX ri) 223 RING_IDX ri)
201{ 224{
202 int i = xennet_rxidx(ri); 225 int i = xennet_rxidx(ri);
203 grant_ref_t ref = np->grant_rx_ref[i]; 226 grant_ref_t ref = queue->grant_rx_ref[i];
204 np->grant_rx_ref[i] = GRANT_INVALID_REF; 227 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
205 return ref; 228 return ref;
206} 229}
207 230
@@ -221,41 +244,40 @@ static bool xennet_can_sg(struct net_device *dev)
221 244
222static void rx_refill_timeout(unsigned long data) 245static void rx_refill_timeout(unsigned long data)
223{ 246{
224 struct net_device *dev = (struct net_device *)data; 247 struct netfront_queue *queue = (struct netfront_queue *)data;
225 struct netfront_info *np = netdev_priv(dev); 248 napi_schedule(&queue->napi);
226 napi_schedule(&np->napi);
227} 249}
228 250
229static int netfront_tx_slot_available(struct netfront_info *np) 251static int netfront_tx_slot_available(struct netfront_queue *queue)
230{ 252{
231 return (np->tx.req_prod_pvt - np->tx.rsp_cons) < 253 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
232 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); 254 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
233} 255}
234 256
235static void xennet_maybe_wake_tx(struct net_device *dev) 257static void xennet_maybe_wake_tx(struct netfront_queue *queue)
236{ 258{
237 struct netfront_info *np = netdev_priv(dev); 259 struct net_device *dev = queue->info->netdev;
260 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
238 261
239 if (unlikely(netif_queue_stopped(dev)) && 262 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
240 netfront_tx_slot_available(np) && 263 netfront_tx_slot_available(queue) &&
241 likely(netif_running(dev))) 264 likely(netif_running(dev)))
242 netif_wake_queue(dev); 265 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
243} 266}
244 267
245static void xennet_alloc_rx_buffers(struct net_device *dev) 268static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
246{ 269{
247 unsigned short id; 270 unsigned short id;
248 struct netfront_info *np = netdev_priv(dev);
249 struct sk_buff *skb; 271 struct sk_buff *skb;
250 struct page *page; 272 struct page *page;
251 int i, batch_target, notify; 273 int i, batch_target, notify;
252 RING_IDX req_prod = np->rx.req_prod_pvt; 274 RING_IDX req_prod = queue->rx.req_prod_pvt;
253 grant_ref_t ref; 275 grant_ref_t ref;
254 unsigned long pfn; 276 unsigned long pfn;
255 void *vaddr; 277 void *vaddr;
256 struct xen_netif_rx_request *req; 278 struct xen_netif_rx_request *req;
257 279
258 if (unlikely(!netif_carrier_ok(dev))) 280 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
259 return; 281 return;
260 282
261 /* 283 /*
@@ -264,9 +286,10 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
264 * allocator, so should reduce the chance of failed allocation requests 286 * allocator, so should reduce the chance of failed allocation requests
265 * both for ourself and for other kernel subsystems. 287 * both for ourself and for other kernel subsystems.
266 */ 288 */
267 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 289 batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
268 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 290 for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
269 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, 291 skb = __netdev_alloc_skb(queue->info->netdev,
292 RX_COPY_THRESHOLD + NET_IP_ALIGN,
270 GFP_ATOMIC | __GFP_NOWARN); 293 GFP_ATOMIC | __GFP_NOWARN);
271 if (unlikely(!skb)) 294 if (unlikely(!skb))
272 goto no_skb; 295 goto no_skb;
@@ -279,7 +302,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
279 kfree_skb(skb); 302 kfree_skb(skb);
280no_skb: 303no_skb:
281 /* Could not allocate any skbuffs. Try again later. */ 304 /* Could not allocate any skbuffs. Try again later. */
282 mod_timer(&np->rx_refill_timer, 305 mod_timer(&queue->rx_refill_timer,
283 jiffies + (HZ/10)); 306 jiffies + (HZ/10));
284 307
285 /* Any skbuffs queued for refill? Force them out. */ 308 /* Any skbuffs queued for refill? Force them out. */
@@ -289,44 +312,44 @@ no_skb:
289 } 312 }
290 313
291 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 314 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
292 __skb_queue_tail(&np->rx_batch, skb); 315 __skb_queue_tail(&queue->rx_batch, skb);
293 } 316 }
294 317
295 /* Is the batch large enough to be worthwhile? */ 318 /* Is the batch large enough to be worthwhile? */
296 if (i < (np->rx_target/2)) { 319 if (i < (queue->rx_target/2)) {
297 if (req_prod > np->rx.sring->req_prod) 320 if (req_prod > queue->rx.sring->req_prod)
298 goto push; 321 goto push;
299 return; 322 return;
300 } 323 }
301 324
302 /* Adjust our fill target if we risked running out of buffers. */ 325 /* Adjust our fill target if we risked running out of buffers. */
303 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 326 if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
304 ((np->rx_target *= 2) > np->rx_max_target)) 327 ((queue->rx_target *= 2) > queue->rx_max_target))
305 np->rx_target = np->rx_max_target; 328 queue->rx_target = queue->rx_max_target;
306 329
307 refill: 330 refill:
308 for (i = 0; ; i++) { 331 for (i = 0; ; i++) {
309 skb = __skb_dequeue(&np->rx_batch); 332 skb = __skb_dequeue(&queue->rx_batch);
310 if (skb == NULL) 333 if (skb == NULL)
311 break; 334 break;
312 335
313 skb->dev = dev; 336 skb->dev = queue->info->netdev;
314 337
315 id = xennet_rxidx(req_prod + i); 338 id = xennet_rxidx(req_prod + i);
316 339
317 BUG_ON(np->rx_skbs[id]); 340 BUG_ON(queue->rx_skbs[id]);
318 np->rx_skbs[id] = skb; 341 queue->rx_skbs[id] = skb;
319 342
320 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 343 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
321 BUG_ON((signed short)ref < 0); 344 BUG_ON((signed short)ref < 0);
322 np->grant_rx_ref[id] = ref; 345 queue->grant_rx_ref[id] = ref;
323 346
324 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 347 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
325 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); 348 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
326 349
327 req = RING_GET_REQUEST(&np->rx, req_prod + i); 350 req = RING_GET_REQUEST(&queue->rx, req_prod + i);
328 gnttab_grant_foreign_access_ref(ref, 351 gnttab_grant_foreign_access_ref(ref,
329 np->xbdev->otherend_id, 352 queue->info->xbdev->otherend_id,
330 pfn_to_mfn(pfn), 353 pfn_to_mfn(pfn),
331 0); 354 0);
332 355
@@ -337,72 +360,77 @@ no_skb:
337 wmb(); /* barrier so backend seens requests */ 360 wmb(); /* barrier so backend seens requests */
338 361
339 /* Above is a suitable barrier to ensure backend will see requests. */ 362 /* Above is a suitable barrier to ensure backend will see requests. */
340 np->rx.req_prod_pvt = req_prod + i; 363 queue->rx.req_prod_pvt = req_prod + i;
341 push: 364 push:
342 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 365 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
343 if (notify) 366 if (notify)
344 notify_remote_via_irq(np->rx_irq); 367 notify_remote_via_irq(queue->rx_irq);
345} 368}
346 369
347static int xennet_open(struct net_device *dev) 370static int xennet_open(struct net_device *dev)
348{ 371{
349 struct netfront_info *np = netdev_priv(dev); 372 struct netfront_info *np = netdev_priv(dev);
350 373 unsigned int num_queues = dev->real_num_tx_queues;
351 napi_enable(&np->napi); 374 unsigned int i = 0;
352 375 struct netfront_queue *queue = NULL;
353 spin_lock_bh(&np->rx_lock); 376
354 if (netif_carrier_ok(dev)) { 377 for (i = 0; i < num_queues; ++i) {
355 xennet_alloc_rx_buffers(dev); 378 queue = &np->queues[i];
356 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 379 napi_enable(&queue->napi);
357 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 380
358 napi_schedule(&np->napi); 381 spin_lock_bh(&queue->rx_lock);
382 if (netif_carrier_ok(dev)) {
383 xennet_alloc_rx_buffers(queue);
384 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
385 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
386 napi_schedule(&queue->napi);
387 }
388 spin_unlock_bh(&queue->rx_lock);
359 } 389 }
360 spin_unlock_bh(&np->rx_lock);
361 390
362 netif_start_queue(dev); 391 netif_tx_start_all_queues(dev);
363 392
364 return 0; 393 return 0;
365} 394}
366 395
367static void xennet_tx_buf_gc(struct net_device *dev) 396static void xennet_tx_buf_gc(struct netfront_queue *queue)
368{ 397{
369 RING_IDX cons, prod; 398 RING_IDX cons, prod;
370 unsigned short id; 399 unsigned short id;
371 struct netfront_info *np = netdev_priv(dev);
372 struct sk_buff *skb; 400 struct sk_buff *skb;
373 401
374 BUG_ON(!netif_carrier_ok(dev)); 402 BUG_ON(!netif_carrier_ok(queue->info->netdev));
375 403
376 do { 404 do {
377 prod = np->tx.sring->rsp_prod; 405 prod = queue->tx.sring->rsp_prod;
378 rmb(); /* Ensure we see responses up to 'rp'. */ 406 rmb(); /* Ensure we see responses up to 'rp'. */
379 407
380 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 408 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
381 struct xen_netif_tx_response *txrsp; 409 struct xen_netif_tx_response *txrsp;
382 410
383 txrsp = RING_GET_RESPONSE(&np->tx, cons); 411 txrsp = RING_GET_RESPONSE(&queue->tx, cons);
384 if (txrsp->status == XEN_NETIF_RSP_NULL) 412 if (txrsp->status == XEN_NETIF_RSP_NULL)
385 continue; 413 continue;
386 414
387 id = txrsp->id; 415 id = txrsp->id;
388 skb = np->tx_skbs[id].skb; 416 skb = queue->tx_skbs[id].skb;
389 if (unlikely(gnttab_query_foreign_access( 417 if (unlikely(gnttab_query_foreign_access(
390 np->grant_tx_ref[id]) != 0)) { 418 queue->grant_tx_ref[id]) != 0)) {
391 pr_alert("%s: warning -- grant still in use by backend domain\n", 419 pr_alert("%s: warning -- grant still in use by backend domain\n",
392 __func__); 420 __func__);
393 BUG(); 421 BUG();
394 } 422 }
395 gnttab_end_foreign_access_ref( 423 gnttab_end_foreign_access_ref(
396 np->grant_tx_ref[id], GNTMAP_readonly); 424 queue->grant_tx_ref[id], GNTMAP_readonly);
397 gnttab_release_grant_reference( 425 gnttab_release_grant_reference(
398 &np->gref_tx_head, np->grant_tx_ref[id]); 426 &queue->gref_tx_head, queue->grant_tx_ref[id]);
399 np->grant_tx_ref[id] = GRANT_INVALID_REF; 427 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
400 np->grant_tx_page[id] = NULL; 428 queue->grant_tx_page[id] = NULL;
401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 429 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
402 dev_kfree_skb_irq(skb); 430 dev_kfree_skb_irq(skb);
403 } 431 }
404 432
405 np->tx.rsp_cons = prod; 433 queue->tx.rsp_cons = prod;
406 434
407 /* 435 /*
408 * Set a new event, then check for race with update of tx_cons. 436 * Set a new event, then check for race with update of tx_cons.
@@ -412,21 +440,20 @@ static void xennet_tx_buf_gc(struct net_device *dev)
412 * data is outstanding: in such cases notification from Xen is 440 * data is outstanding: in such cases notification from Xen is
413 * likely to be the only kick that we'll get. 441 * likely to be the only kick that we'll get.
414 */ 442 */
415 np->tx.sring->rsp_event = 443 queue->tx.sring->rsp_event =
416 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 444 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
417 mb(); /* update shared area */ 445 mb(); /* update shared area */
418 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 446 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
419 447
420 xennet_maybe_wake_tx(dev); 448 xennet_maybe_wake_tx(queue);
421} 449}
422 450
423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 451static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
424 struct xen_netif_tx_request *tx) 452 struct xen_netif_tx_request *tx)
425{ 453{
426 struct netfront_info *np = netdev_priv(dev);
427 char *data = skb->data; 454 char *data = skb->data;
428 unsigned long mfn; 455 unsigned long mfn;
429 RING_IDX prod = np->tx.req_prod_pvt; 456 RING_IDX prod = queue->tx.req_prod_pvt;
430 int frags = skb_shinfo(skb)->nr_frags; 457 int frags = skb_shinfo(skb)->nr_frags;
431 unsigned int offset = offset_in_page(data); 458 unsigned int offset = offset_in_page(data);
432 unsigned int len = skb_headlen(skb); 459 unsigned int len = skb_headlen(skb);
@@ -443,19 +470,19 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
443 data += tx->size; 470 data += tx->size;
444 offset = 0; 471 offset = 0;
445 472
446 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 473 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
447 np->tx_skbs[id].skb = skb_get(skb); 474 queue->tx_skbs[id].skb = skb_get(skb);
448 tx = RING_GET_REQUEST(&np->tx, prod++); 475 tx = RING_GET_REQUEST(&queue->tx, prod++);
449 tx->id = id; 476 tx->id = id;
450 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 477 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
451 BUG_ON((signed short)ref < 0); 478 BUG_ON((signed short)ref < 0);
452 479
453 mfn = virt_to_mfn(data); 480 mfn = virt_to_mfn(data);
454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 481 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
455 mfn, GNTMAP_readonly); 482 mfn, GNTMAP_readonly);
456 483
457 np->grant_tx_page[id] = virt_to_page(data); 484 queue->grant_tx_page[id] = virt_to_page(data);
458 tx->gref = np->grant_tx_ref[id] = ref; 485 tx->gref = queue->grant_tx_ref[id] = ref;
459 tx->offset = offset; 486 tx->offset = offset;
460 tx->size = len; 487 tx->size = len;
461 tx->flags = 0; 488 tx->flags = 0;
@@ -487,21 +514,21 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
487 514
488 tx->flags |= XEN_NETTXF_more_data; 515 tx->flags |= XEN_NETTXF_more_data;
489 516
490 id = get_id_from_freelist(&np->tx_skb_freelist, 517 id = get_id_from_freelist(&queue->tx_skb_freelist,
491 np->tx_skbs); 518 queue->tx_skbs);
492 np->tx_skbs[id].skb = skb_get(skb); 519 queue->tx_skbs[id].skb = skb_get(skb);
493 tx = RING_GET_REQUEST(&np->tx, prod++); 520 tx = RING_GET_REQUEST(&queue->tx, prod++);
494 tx->id = id; 521 tx->id = id;
495 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 522 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
496 BUG_ON((signed short)ref < 0); 523 BUG_ON((signed short)ref < 0);
497 524
498 mfn = pfn_to_mfn(page_to_pfn(page)); 525 mfn = pfn_to_mfn(page_to_pfn(page));
499 gnttab_grant_foreign_access_ref(ref, 526 gnttab_grant_foreign_access_ref(ref,
500 np->xbdev->otherend_id, 527 queue->info->xbdev->otherend_id,
501 mfn, GNTMAP_readonly); 528 mfn, GNTMAP_readonly);
502 529
503 np->grant_tx_page[id] = page; 530 queue->grant_tx_page[id] = page;
504 tx->gref = np->grant_tx_ref[id] = ref; 531 tx->gref = queue->grant_tx_ref[id] = ref;
505 tx->offset = offset; 532 tx->offset = offset;
506 tx->size = bytes; 533 tx->size = bytes;
507 tx->flags = 0; 534 tx->flags = 0;
@@ -518,7 +545,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
518 } 545 }
519 } 546 }
520 547
521 np->tx.req_prod_pvt = prod; 548 queue->tx.req_prod_pvt = prod;
522} 549}
523 550
524/* 551/*
@@ -544,6 +571,24 @@ static int xennet_count_skb_frag_slots(struct sk_buff *skb)
544 return pages; 571 return pages;
545} 572}
546 573
574static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
575 void *accel_priv, select_queue_fallback_t fallback)
576{
577 unsigned int num_queues = dev->real_num_tx_queues;
578 u32 hash;
579 u16 queue_idx;
580
581 /* First, check if there is only one queue */
582 if (num_queues == 1) {
583 queue_idx = 0;
584 } else {
585 hash = skb_get_hash(skb);
586 queue_idx = hash % num_queues;
587 }
588
589 return queue_idx;
590}
591
547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 592static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
548{ 593{
549 unsigned short id; 594 unsigned short id;
@@ -559,6 +604,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
559 unsigned int offset = offset_in_page(data); 604 unsigned int offset = offset_in_page(data);
560 unsigned int len = skb_headlen(skb); 605 unsigned int len = skb_headlen(skb);
561 unsigned long flags; 606 unsigned long flags;
607 struct netfront_queue *queue = NULL;
608 unsigned int num_queues = dev->real_num_tx_queues;
609 u16 queue_index;
610
611 /* Drop the packet if no queues are set up */
612 if (num_queues < 1)
613 goto drop;
614 /* Determine which queue to transmit this SKB on */
615 queue_index = skb_get_queue_mapping(skb);
616 queue = &np->queues[queue_index];
562 617
563 /* If skb->len is too big for wire format, drop skb and alert 618 /* If skb->len is too big for wire format, drop skb and alert
564 * user about misconfiguration. 619 * user about misconfiguration.
@@ -578,30 +633,30 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
578 goto drop; 633 goto drop;
579 } 634 }
580 635
581 spin_lock_irqsave(&np->tx_lock, flags); 636 spin_lock_irqsave(&queue->tx_lock, flags);
582 637
583 if (unlikely(!netif_carrier_ok(dev) || 638 if (unlikely(!netif_carrier_ok(dev) ||
584 (slots > 1 && !xennet_can_sg(dev)) || 639 (slots > 1 && !xennet_can_sg(dev)) ||
585 netif_needs_gso(skb, netif_skb_features(skb)))) { 640 netif_needs_gso(skb, netif_skb_features(skb)))) {
586 spin_unlock_irqrestore(&np->tx_lock, flags); 641 spin_unlock_irqrestore(&queue->tx_lock, flags);
587 goto drop; 642 goto drop;
588 } 643 }
589 644
590 i = np->tx.req_prod_pvt; 645 i = queue->tx.req_prod_pvt;
591 646
592 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 647 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
593 np->tx_skbs[id].skb = skb; 648 queue->tx_skbs[id].skb = skb;
594 649
595 tx = RING_GET_REQUEST(&np->tx, i); 650 tx = RING_GET_REQUEST(&queue->tx, i);
596 651
597 tx->id = id; 652 tx->id = id;
598 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 653 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
599 BUG_ON((signed short)ref < 0); 654 BUG_ON((signed short)ref < 0);
600 mfn = virt_to_mfn(data); 655 mfn = virt_to_mfn(data);
601 gnttab_grant_foreign_access_ref( 656 gnttab_grant_foreign_access_ref(
602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 657 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
603 np->grant_tx_page[id] = virt_to_page(data); 658 queue->grant_tx_page[id] = virt_to_page(data);
604 tx->gref = np->grant_tx_ref[id] = ref; 659 tx->gref = queue->grant_tx_ref[id] = ref;
605 tx->offset = offset; 660 tx->offset = offset;
606 tx->size = len; 661 tx->size = len;
607 662
@@ -617,7 +672,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 struct xen_netif_extra_info *gso; 672 struct xen_netif_extra_info *gso;
618 673
619 gso = (struct xen_netif_extra_info *) 674 gso = (struct xen_netif_extra_info *)
620 RING_GET_REQUEST(&np->tx, ++i); 675 RING_GET_REQUEST(&queue->tx, ++i);
621 676
622 tx->flags |= XEN_NETTXF_extra_info; 677 tx->flags |= XEN_NETTXF_extra_info;
623 678
@@ -632,14 +687,14 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
632 gso->flags = 0; 687 gso->flags = 0;
633 } 688 }
634 689
635 np->tx.req_prod_pvt = i + 1; 690 queue->tx.req_prod_pvt = i + 1;
636 691
637 xennet_make_frags(skb, dev, tx); 692 xennet_make_frags(skb, queue, tx);
638 tx->size = skb->len; 693 tx->size = skb->len;
639 694
640 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 695 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
641 if (notify) 696 if (notify)
642 notify_remote_via_irq(np->tx_irq); 697 notify_remote_via_irq(queue->tx_irq);
643 698
644 u64_stats_update_begin(&stats->syncp); 699 u64_stats_update_begin(&stats->syncp);
645 stats->tx_bytes += skb->len; 700 stats->tx_bytes += skb->len;
@@ -647,12 +702,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 u64_stats_update_end(&stats->syncp); 702 u64_stats_update_end(&stats->syncp);
648 703
649 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 704 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
650 xennet_tx_buf_gc(dev); 705 xennet_tx_buf_gc(queue);
651 706
652 if (!netfront_tx_slot_available(np)) 707 if (!netfront_tx_slot_available(queue))
653 netif_stop_queue(dev); 708 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
654 709
655 spin_unlock_irqrestore(&np->tx_lock, flags); 710 spin_unlock_irqrestore(&queue->tx_lock, flags);
656 711
657 return NETDEV_TX_OK; 712 return NETDEV_TX_OK;
658 713
@@ -665,32 +720,38 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
665static int xennet_close(struct net_device *dev) 720static int xennet_close(struct net_device *dev)
666{ 721{
667 struct netfront_info *np = netdev_priv(dev); 722 struct netfront_info *np = netdev_priv(dev);
668 netif_stop_queue(np->netdev); 723 unsigned int num_queues = dev->real_num_tx_queues;
669 napi_disable(&np->napi); 724 unsigned int i;
725 struct netfront_queue *queue;
726 netif_tx_stop_all_queues(np->netdev);
727 for (i = 0; i < num_queues; ++i) {
728 queue = &np->queues[i];
729 napi_disable(&queue->napi);
730 }
670 return 0; 731 return 0;
671} 732}
672 733
673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 734static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
674 grant_ref_t ref) 735 grant_ref_t ref)
675{ 736{
676 int new = xennet_rxidx(np->rx.req_prod_pvt); 737 int new = xennet_rxidx(queue->rx.req_prod_pvt);
677 738
678 BUG_ON(np->rx_skbs[new]); 739 BUG_ON(queue->rx_skbs[new]);
679 np->rx_skbs[new] = skb; 740 queue->rx_skbs[new] = skb;
680 np->grant_rx_ref[new] = ref; 741 queue->grant_rx_ref[new] = ref;
681 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 742 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
682 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 743 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
683 np->rx.req_prod_pvt++; 744 queue->rx.req_prod_pvt++;
684} 745}
685 746
686static int xennet_get_extras(struct netfront_info *np, 747static int xennet_get_extras(struct netfront_queue *queue,
687 struct xen_netif_extra_info *extras, 748 struct xen_netif_extra_info *extras,
688 RING_IDX rp) 749 RING_IDX rp)
689 750
690{ 751{
691 struct xen_netif_extra_info *extra; 752 struct xen_netif_extra_info *extra;
692 struct device *dev = &np->netdev->dev; 753 struct device *dev = &queue->info->netdev->dev;
693 RING_IDX cons = np->rx.rsp_cons; 754 RING_IDX cons = queue->rx.rsp_cons;
694 int err = 0; 755 int err = 0;
695 756
696 do { 757 do {
@@ -705,7 +766,7 @@ static int xennet_get_extras(struct netfront_info *np,
705 } 766 }
706 767
707 extra = (struct xen_netif_extra_info *) 768 extra = (struct xen_netif_extra_info *)
708 RING_GET_RESPONSE(&np->rx, ++cons); 769 RING_GET_RESPONSE(&queue->rx, ++cons);
709 770
710 if (unlikely(!extra->type || 771 if (unlikely(!extra->type ||
711 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 772 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
@@ -718,33 +779,33 @@ static int xennet_get_extras(struct netfront_info *np,
718 sizeof(*extra)); 779 sizeof(*extra));
719 } 780 }
720 781
721 skb = xennet_get_rx_skb(np, cons); 782 skb = xennet_get_rx_skb(queue, cons);
722 ref = xennet_get_rx_ref(np, cons); 783 ref = xennet_get_rx_ref(queue, cons);
723 xennet_move_rx_slot(np, skb, ref); 784 xennet_move_rx_slot(queue, skb, ref);
724 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 785 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
725 786
726 np->rx.rsp_cons = cons; 787 queue->rx.rsp_cons = cons;
727 return err; 788 return err;
728} 789}
729 790
730static int xennet_get_responses(struct netfront_info *np, 791static int xennet_get_responses(struct netfront_queue *queue,
731 struct netfront_rx_info *rinfo, RING_IDX rp, 792 struct netfront_rx_info *rinfo, RING_IDX rp,
732 struct sk_buff_head *list) 793 struct sk_buff_head *list)
733{ 794{
734 struct xen_netif_rx_response *rx = &rinfo->rx; 795 struct xen_netif_rx_response *rx = &rinfo->rx;
735 struct xen_netif_extra_info *extras = rinfo->extras; 796 struct xen_netif_extra_info *extras = rinfo->extras;
736 struct device *dev = &np->netdev->dev; 797 struct device *dev = &queue->info->netdev->dev;
737 RING_IDX cons = np->rx.rsp_cons; 798 RING_IDX cons = queue->rx.rsp_cons;
738 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 799 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
739 grant_ref_t ref = xennet_get_rx_ref(np, cons); 800 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
740 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 801 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
741 int slots = 1; 802 int slots = 1;
742 int err = 0; 803 int err = 0;
743 unsigned long ret; 804 unsigned long ret;
744 805
745 if (rx->flags & XEN_NETRXF_extra_info) { 806 if (rx->flags & XEN_NETRXF_extra_info) {
746 err = xennet_get_extras(np, extras, rp); 807 err = xennet_get_extras(queue, extras, rp);
747 cons = np->rx.rsp_cons; 808 cons = queue->rx.rsp_cons;
748 } 809 }
749 810
750 for (;;) { 811 for (;;) {
@@ -753,7 +814,7 @@ static int xennet_get_responses(struct netfront_info *np,
753 if (net_ratelimit()) 814 if (net_ratelimit())
754 dev_warn(dev, "rx->offset: %x, size: %u\n", 815 dev_warn(dev, "rx->offset: %x, size: %u\n",
755 rx->offset, rx->status); 816 rx->offset, rx->status);
756 xennet_move_rx_slot(np, skb, ref); 817 xennet_move_rx_slot(queue, skb, ref);
757 err = -EINVAL; 818 err = -EINVAL;
758 goto next; 819 goto next;
759 } 820 }
@@ -774,7 +835,7 @@ static int xennet_get_responses(struct netfront_info *np,
774 ret = gnttab_end_foreign_access_ref(ref, 0); 835 ret = gnttab_end_foreign_access_ref(ref, 0);
775 BUG_ON(!ret); 836 BUG_ON(!ret);
776 837
777 gnttab_release_grant_reference(&np->gref_rx_head, ref); 838 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
778 839
779 __skb_queue_tail(list, skb); 840 __skb_queue_tail(list, skb);
780 841
@@ -789,9 +850,9 @@ next:
789 break; 850 break;
790 } 851 }
791 852
792 rx = RING_GET_RESPONSE(&np->rx, cons + slots); 853 rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
793 skb = xennet_get_rx_skb(np, cons + slots); 854 skb = xennet_get_rx_skb(queue, cons + slots);
794 ref = xennet_get_rx_ref(np, cons + slots); 855 ref = xennet_get_rx_ref(queue, cons + slots);
795 slots++; 856 slots++;
796 } 857 }
797 858
@@ -802,7 +863,7 @@ next:
802 } 863 }
803 864
804 if (unlikely(err)) 865 if (unlikely(err))
805 np->rx.rsp_cons = cons + slots; 866 queue->rx.rsp_cons = cons + slots;
806 867
807 return err; 868 return err;
808} 869}
@@ -836,17 +897,17 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
836 return 0; 897 return 0;
837} 898}
838 899
839static RING_IDX xennet_fill_frags(struct netfront_info *np, 900static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
840 struct sk_buff *skb, 901 struct sk_buff *skb,
841 struct sk_buff_head *list) 902 struct sk_buff_head *list)
842{ 903{
843 struct skb_shared_info *shinfo = skb_shinfo(skb); 904 struct skb_shared_info *shinfo = skb_shinfo(skb);
844 RING_IDX cons = np->rx.rsp_cons; 905 RING_IDX cons = queue->rx.rsp_cons;
845 struct sk_buff *nskb; 906 struct sk_buff *nskb;
846 907
847 while ((nskb = __skb_dequeue(list))) { 908 while ((nskb = __skb_dequeue(list))) {
848 struct xen_netif_rx_response *rx = 909 struct xen_netif_rx_response *rx =
849 RING_GET_RESPONSE(&np->rx, ++cons); 910 RING_GET_RESPONSE(&queue->rx, ++cons);
850 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 911 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
851 912
852 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 913 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
@@ -879,7 +940,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
879 */ 940 */
880 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 941 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
881 struct netfront_info *np = netdev_priv(dev); 942 struct netfront_info *np = netdev_priv(dev);
882 np->rx_gso_checksum_fixup++; 943 atomic_inc(&np->rx_gso_checksum_fixup);
883 skb->ip_summed = CHECKSUM_PARTIAL; 944 skb->ip_summed = CHECKSUM_PARTIAL;
884 recalculate_partial_csum = true; 945 recalculate_partial_csum = true;
885 } 946 }
@@ -891,11 +952,10 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
891 return skb_checksum_setup(skb, recalculate_partial_csum); 952 return skb_checksum_setup(skb, recalculate_partial_csum);
892} 953}
893 954
894static int handle_incoming_queue(struct net_device *dev, 955static int handle_incoming_queue(struct netfront_queue *queue,
895 struct sk_buff_head *rxq) 956 struct sk_buff_head *rxq)
896{ 957{
897 struct netfront_info *np = netdev_priv(dev); 958 struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
898 struct netfront_stats *stats = this_cpu_ptr(np->stats);
899 int packets_dropped = 0; 959 int packets_dropped = 0;
900 struct sk_buff *skb; 960 struct sk_buff *skb;
901 961
@@ -906,13 +966,13 @@ static int handle_incoming_queue(struct net_device *dev,
906 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 966 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
907 967
908 /* Ethernet work: Delayed to here as it peeks the header. */ 968 /* Ethernet work: Delayed to here as it peeks the header. */
909 skb->protocol = eth_type_trans(skb, dev); 969 skb->protocol = eth_type_trans(skb, queue->info->netdev);
910 skb_reset_network_header(skb); 970 skb_reset_network_header(skb);
911 971
912 if (checksum_setup(dev, skb)) { 972 if (checksum_setup(queue->info->netdev, skb)) {
913 kfree_skb(skb); 973 kfree_skb(skb);
914 packets_dropped++; 974 packets_dropped++;
915 dev->stats.rx_errors++; 975 queue->info->netdev->stats.rx_errors++;
916 continue; 976 continue;
917 } 977 }
918 978
@@ -922,7 +982,7 @@ static int handle_incoming_queue(struct net_device *dev,
922 u64_stats_update_end(&stats->syncp); 982 u64_stats_update_end(&stats->syncp);
923 983
924 /* Pass it up. */ 984 /* Pass it up. */
925 napi_gro_receive(&np->napi, skb); 985 napi_gro_receive(&queue->napi, skb);
926 } 986 }
927 987
928 return packets_dropped; 988 return packets_dropped;
@@ -930,8 +990,8 @@ static int handle_incoming_queue(struct net_device *dev,
930 990
931static int xennet_poll(struct napi_struct *napi, int budget) 991static int xennet_poll(struct napi_struct *napi, int budget)
932{ 992{
933 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 993 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
934 struct net_device *dev = np->netdev; 994 struct net_device *dev = queue->info->netdev;
935 struct sk_buff *skb; 995 struct sk_buff *skb;
936 struct netfront_rx_info rinfo; 996 struct netfront_rx_info rinfo;
937 struct xen_netif_rx_response *rx = &rinfo.rx; 997 struct xen_netif_rx_response *rx = &rinfo.rx;
@@ -944,29 +1004,29 @@ static int xennet_poll(struct napi_struct *napi, int budget)
944 unsigned long flags; 1004 unsigned long flags;
945 int err; 1005 int err;
946 1006
947 spin_lock(&np->rx_lock); 1007 spin_lock(&queue->rx_lock);
948 1008
949 skb_queue_head_init(&rxq); 1009 skb_queue_head_init(&rxq);
950 skb_queue_head_init(&errq); 1010 skb_queue_head_init(&errq);
951 skb_queue_head_init(&tmpq); 1011 skb_queue_head_init(&tmpq);
952 1012
953 rp = np->rx.sring->rsp_prod; 1013 rp = queue->rx.sring->rsp_prod;
954 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1014 rmb(); /* Ensure we see queued responses up to 'rp'. */
955 1015
956 i = np->rx.rsp_cons; 1016 i = queue->rx.rsp_cons;
957 work_done = 0; 1017 work_done = 0;
958 while ((i != rp) && (work_done < budget)) { 1018 while ((i != rp) && (work_done < budget)) {
959 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 1019 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
960 memset(extras, 0, sizeof(rinfo.extras)); 1020 memset(extras, 0, sizeof(rinfo.extras));
961 1021
962 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 1022 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
963 1023
964 if (unlikely(err)) { 1024 if (unlikely(err)) {
965err: 1025err:
966 while ((skb = __skb_dequeue(&tmpq))) 1026 while ((skb = __skb_dequeue(&tmpq)))
967 __skb_queue_tail(&errq, skb); 1027 __skb_queue_tail(&errq, skb);
968 dev->stats.rx_errors++; 1028 dev->stats.rx_errors++;
969 i = np->rx.rsp_cons; 1029 i = queue->rx.rsp_cons;
970 continue; 1030 continue;
971 } 1031 }
972 1032
@@ -978,7 +1038,7 @@ err:
978 1038
979 if (unlikely(xennet_set_skb_gso(skb, gso))) { 1039 if (unlikely(xennet_set_skb_gso(skb, gso))) {
980 __skb_queue_head(&tmpq, skb); 1040 __skb_queue_head(&tmpq, skb);
981 np->rx.rsp_cons += skb_queue_len(&tmpq); 1041 queue->rx.rsp_cons += skb_queue_len(&tmpq);
982 goto err; 1042 goto err;
983 } 1043 }
984 } 1044 }
@@ -992,7 +1052,7 @@ err:
992 skb->data_len = rx->status; 1052 skb->data_len = rx->status;
993 skb->len += rx->status; 1053 skb->len += rx->status;
994 1054
995 i = xennet_fill_frags(np, skb, &tmpq); 1055 i = xennet_fill_frags(queue, skb, &tmpq);
996 1056
997 if (rx->flags & XEN_NETRXF_csum_blank) 1057 if (rx->flags & XEN_NETRXF_csum_blank)
998 skb->ip_summed = CHECKSUM_PARTIAL; 1058 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1001,22 +1061,22 @@ err:
1001 1061
1002 __skb_queue_tail(&rxq, skb); 1062 __skb_queue_tail(&rxq, skb);
1003 1063
1004 np->rx.rsp_cons = ++i; 1064 queue->rx.rsp_cons = ++i;
1005 work_done++; 1065 work_done++;
1006 } 1066 }
1007 1067
1008 __skb_queue_purge(&errq); 1068 __skb_queue_purge(&errq);
1009 1069
1010 work_done -= handle_incoming_queue(dev, &rxq); 1070 work_done -= handle_incoming_queue(queue, &rxq);
1011 1071
1012 /* If we get a callback with very few responses, reduce fill target. */ 1072 /* If we get a callback with very few responses, reduce fill target. */
1013 /* NB. Note exponential increase, linear decrease. */ 1073 /* NB. Note exponential increase, linear decrease. */
1014 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1074 if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
1015 ((3*np->rx_target) / 4)) && 1075 ((3*queue->rx_target) / 4)) &&
1016 (--np->rx_target < np->rx_min_target)) 1076 (--queue->rx_target < queue->rx_min_target))
1017 np->rx_target = np->rx_min_target; 1077 queue->rx_target = queue->rx_min_target;
1018 1078
1019 xennet_alloc_rx_buffers(dev); 1079 xennet_alloc_rx_buffers(queue);
1020 1080
1021 if (work_done < budget) { 1081 if (work_done < budget) {
1022 int more_to_do = 0; 1082 int more_to_do = 0;
@@ -1025,14 +1085,14 @@ err:
1025 1085
1026 local_irq_save(flags); 1086 local_irq_save(flags);
1027 1087
1028 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 1088 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1029 if (!more_to_do) 1089 if (!more_to_do)
1030 __napi_complete(napi); 1090 __napi_complete(napi);
1031 1091
1032 local_irq_restore(flags); 1092 local_irq_restore(flags);
1033 } 1093 }
1034 1094
1035 spin_unlock(&np->rx_lock); 1095 spin_unlock(&queue->rx_lock);
1036 1096
1037 return work_done; 1097 return work_done;
1038} 1098}
@@ -1080,43 +1140,43 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1080 return tot; 1140 return tot;
1081} 1141}
1082 1142
1083static void xennet_release_tx_bufs(struct netfront_info *np) 1143static void xennet_release_tx_bufs(struct netfront_queue *queue)
1084{ 1144{
1085 struct sk_buff *skb; 1145 struct sk_buff *skb;
1086 int i; 1146 int i;
1087 1147
1088 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1148 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089 /* Skip over entries which are actually freelist references */ 1149 /* Skip over entries which are actually freelist references */
1090 if (skb_entry_is_link(&np->tx_skbs[i])) 1150 if (skb_entry_is_link(&queue->tx_skbs[i]))
1091 continue; 1151 continue;
1092 1152
1093 skb = np->tx_skbs[i].skb; 1153 skb = queue->tx_skbs[i].skb;
1094 get_page(np->grant_tx_page[i]); 1154 get_page(queue->grant_tx_page[i]);
1095 gnttab_end_foreign_access(np->grant_tx_ref[i], 1155 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1096 GNTMAP_readonly, 1156 GNTMAP_readonly,
1097 (unsigned long)page_address(np->grant_tx_page[i])); 1157 (unsigned long)page_address(queue->grant_tx_page[i]));
1098 np->grant_tx_page[i] = NULL; 1158 queue->grant_tx_page[i] = NULL;
1099 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1159 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1100 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1160 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1101 dev_kfree_skb_irq(skb); 1161 dev_kfree_skb_irq(skb);
1102 } 1162 }
1103} 1163}
1104 1164
1105static void xennet_release_rx_bufs(struct netfront_info *np) 1165static void xennet_release_rx_bufs(struct netfront_queue *queue)
1106{ 1166{
1107 int id, ref; 1167 int id, ref;
1108 1168
1109 spin_lock_bh(&np->rx_lock); 1169 spin_lock_bh(&queue->rx_lock);
1110 1170
1111 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1171 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1112 struct sk_buff *skb; 1172 struct sk_buff *skb;
1113 struct page *page; 1173 struct page *page;
1114 1174
1115 skb = np->rx_skbs[id]; 1175 skb = queue->rx_skbs[id];
1116 if (!skb) 1176 if (!skb)
1117 continue; 1177 continue;
1118 1178
1119 ref = np->grant_rx_ref[id]; 1179 ref = queue->grant_rx_ref[id];
1120 if (ref == GRANT_INVALID_REF) 1180 if (ref == GRANT_INVALID_REF)
1121 continue; 1181 continue;
1122 1182
@@ -1128,21 +1188,28 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
1128 get_page(page); 1188 get_page(page);
1129 gnttab_end_foreign_access(ref, 0, 1189 gnttab_end_foreign_access(ref, 0,
1130 (unsigned long)page_address(page)); 1190 (unsigned long)page_address(page));
1131 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1191 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1132 1192
1133 kfree_skb(skb); 1193 kfree_skb(skb);
1134 } 1194 }
1135 1195
1136 spin_unlock_bh(&np->rx_lock); 1196 spin_unlock_bh(&queue->rx_lock);
1137} 1197}
1138 1198
1139static void xennet_uninit(struct net_device *dev) 1199static void xennet_uninit(struct net_device *dev)
1140{ 1200{
1141 struct netfront_info *np = netdev_priv(dev); 1201 struct netfront_info *np = netdev_priv(dev);
1142 xennet_release_tx_bufs(np); 1202 unsigned int num_queues = dev->real_num_tx_queues;
1143 xennet_release_rx_bufs(np); 1203 struct netfront_queue *queue;
1144 gnttab_free_grant_references(np->gref_tx_head); 1204 unsigned int i;
1145 gnttab_free_grant_references(np->gref_rx_head); 1205
1206 for (i = 0; i < num_queues; ++i) {
1207 queue = &np->queues[i];
1208 xennet_release_tx_bufs(queue);
1209 xennet_release_rx_bufs(queue);
1210 gnttab_free_grant_references(queue->gref_tx_head);
1211 gnttab_free_grant_references(queue->gref_rx_head);
1212 }
1146} 1213}
1147 1214
1148static netdev_features_t xennet_fix_features(struct net_device *dev, 1215static netdev_features_t xennet_fix_features(struct net_device *dev,
@@ -1203,25 +1270,24 @@ static int xennet_set_features(struct net_device *dev,
1203 1270
1204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1271static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205{ 1272{
1206 struct netfront_info *np = dev_id; 1273 struct netfront_queue *queue = dev_id;
1207 struct net_device *dev = np->netdev;
1208 unsigned long flags; 1274 unsigned long flags;
1209 1275
1210 spin_lock_irqsave(&np->tx_lock, flags); 1276 spin_lock_irqsave(&queue->tx_lock, flags);
1211 xennet_tx_buf_gc(dev); 1277 xennet_tx_buf_gc(queue);
1212 spin_unlock_irqrestore(&np->tx_lock, flags); 1278 spin_unlock_irqrestore(&queue->tx_lock, flags);
1213 1279
1214 return IRQ_HANDLED; 1280 return IRQ_HANDLED;
1215} 1281}
1216 1282
1217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1283static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{ 1284{
1219 struct netfront_info *np = dev_id; 1285 struct netfront_queue *queue = dev_id;
1220 struct net_device *dev = np->netdev; 1286 struct net_device *dev = queue->info->netdev;
1221 1287
1222 if (likely(netif_carrier_ok(dev) && 1288 if (likely(netif_carrier_ok(dev) &&
1223 RING_HAS_UNCONSUMED_RESPONSES(&np->rx))) 1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1224 napi_schedule(&np->napi); 1290 napi_schedule(&queue->napi);
1225 1291
1226 return IRQ_HANDLED; 1292 return IRQ_HANDLED;
1227} 1293}
@@ -1236,7 +1302,12 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1236#ifdef CONFIG_NET_POLL_CONTROLLER 1302#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev) 1303static void xennet_poll_controller(struct net_device *dev)
1238{ 1304{
1239 xennet_interrupt(0, dev); 1305 /* Poll each queue */
1306 struct netfront_info *info = netdev_priv(dev);
1307 unsigned int num_queues = dev->real_num_tx_queues;
1308 unsigned int i;
1309 for (i = 0; i < num_queues; ++i)
1310 xennet_interrupt(0, &info->queues[i]);
1240} 1311}
1241#endif 1312#endif
1242 1313
@@ -1251,6 +1322,7 @@ static const struct net_device_ops xennet_netdev_ops = {
1251 .ndo_validate_addr = eth_validate_addr, 1322 .ndo_validate_addr = eth_validate_addr,
1252 .ndo_fix_features = xennet_fix_features, 1323 .ndo_fix_features = xennet_fix_features,
1253 .ndo_set_features = xennet_set_features, 1324 .ndo_set_features = xennet_set_features,
1325 .ndo_select_queue = xennet_select_queue,
1254#ifdef CONFIG_NET_POLL_CONTROLLER 1326#ifdef CONFIG_NET_POLL_CONTROLLER
1255 .ndo_poll_controller = xennet_poll_controller, 1327 .ndo_poll_controller = xennet_poll_controller,
1256#endif 1328#endif
@@ -1258,66 +1330,30 @@ static const struct net_device_ops xennet_netdev_ops = {
1258 1330
1259static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1331static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1260{ 1332{
1261 int i, err; 1333 int err;
1262 struct net_device *netdev; 1334 struct net_device *netdev;
1263 struct netfront_info *np; 1335 struct netfront_info *np;
1264 1336
1265 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1337 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1266 if (!netdev) 1338 if (!netdev)
1267 return ERR_PTR(-ENOMEM); 1339 return ERR_PTR(-ENOMEM);
1268 1340
1269 np = netdev_priv(netdev); 1341 np = netdev_priv(netdev);
1270 np->xbdev = dev; 1342 np->xbdev = dev;
1271 1343
1272 spin_lock_init(&np->tx_lock); 1344 /* No need to use rtnl_lock() before the call below as it
1273 spin_lock_init(&np->rx_lock); 1345 * happens before register_netdev().
1274 1346 */
1275 skb_queue_head_init(&np->rx_batch); 1347 netif_set_real_num_tx_queues(netdev, 0);
1276 np->rx_target = RX_DFL_MIN_TARGET; 1348 np->queues = NULL;
1277 np->rx_min_target = RX_DFL_MIN_TARGET;
1278 np->rx_max_target = RX_MAX_TARGET;
1279
1280 init_timer(&np->rx_refill_timer);
1281 np->rx_refill_timer.data = (unsigned long)netdev;
1282 np->rx_refill_timer.function = rx_refill_timeout;
1283 1349
1284 err = -ENOMEM; 1350 err = -ENOMEM;
1285 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1351 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286 if (np->stats == NULL) 1352 if (np->stats == NULL)
1287 goto exit; 1353 goto exit;
1288 1354
1289 /* Initialise tx_skbs as a free chain containing every entry. */
1290 np->tx_skb_freelist = 0;
1291 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292 skb_entry_set_link(&np->tx_skbs[i], i+1);
1293 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294 np->grant_tx_page[i] = NULL;
1295 }
1296
1297 /* Clear out rx_skbs */
1298 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299 np->rx_skbs[i] = NULL;
1300 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301 }
1302
1303 /* A grant for every tx ring slot */
1304 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305 &np->gref_tx_head) < 0) {
1306 pr_alert("can't alloc tx grant refs\n");
1307 err = -ENOMEM;
1308 goto exit_free_stats;
1309 }
1310 /* A grant for every rx ring slot */
1311 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312 &np->gref_rx_head) < 0) {
1313 pr_alert("can't alloc rx grant refs\n");
1314 err = -ENOMEM;
1315 goto exit_free_tx;
1316 }
1317
1318 netdev->netdev_ops = &xennet_netdev_ops; 1355 netdev->netdev_ops = &xennet_netdev_ops;
1319 1356
1320 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1321 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1357 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322 NETIF_F_GSO_ROBUST; 1358 NETIF_F_GSO_ROBUST;
1323 netdev->hw_features = NETIF_F_SG | 1359 netdev->hw_features = NETIF_F_SG |
@@ -1332,7 +1368,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1332 */ 1368 */
1333 netdev->features |= netdev->hw_features; 1369 netdev->features |= netdev->hw_features;
1334 1370
1335 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1371 netdev->ethtool_ops = &xennet_ethtool_ops;
1336 SET_NETDEV_DEV(netdev, &dev->dev); 1372 SET_NETDEV_DEV(netdev, &dev->dev);
1337 1373
1338 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); 1374 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
@@ -1343,10 +1379,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1343 1379
1344 return netdev; 1380 return netdev;
1345 1381
1346 exit_free_tx:
1347 gnttab_free_grant_references(np->gref_tx_head);
1348 exit_free_stats:
1349 free_percpu(np->stats);
1350 exit: 1382 exit:
1351 free_netdev(netdev); 1383 free_netdev(netdev);
1352 return ERR_PTR(err); 1384 return ERR_PTR(err);
@@ -1404,30 +1436,36 @@ static void xennet_end_access(int ref, void *page)
1404 1436
1405static void xennet_disconnect_backend(struct netfront_info *info) 1437static void xennet_disconnect_backend(struct netfront_info *info)
1406{ 1438{
1407 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1439 unsigned int i = 0;
1408 spin_lock_bh(&info->rx_lock); 1440 struct netfront_queue *queue = NULL;
1409 spin_lock_irq(&info->tx_lock); 1441 unsigned int num_queues = info->netdev->real_num_tx_queues;
1410 netif_carrier_off(info->netdev); 1442
1411 spin_unlock_irq(&info->tx_lock); 1443 for (i = 0; i < num_queues; ++i) {
1412 spin_unlock_bh(&info->rx_lock); 1444 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1413 1445 spin_lock_bh(&queue->rx_lock);
1414 if (info->tx_irq && (info->tx_irq == info->rx_irq)) 1446 spin_lock_irq(&queue->tx_lock);
1415 unbind_from_irqhandler(info->tx_irq, info); 1447 netif_carrier_off(queue->info->netdev);
1416 if (info->tx_irq && (info->tx_irq != info->rx_irq)) { 1448 spin_unlock_irq(&queue->tx_lock);
1417 unbind_from_irqhandler(info->tx_irq, info); 1449 spin_unlock_bh(&queue->rx_lock);
1418 unbind_from_irqhandler(info->rx_irq, info); 1450
1419 } 1451 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1420 info->tx_evtchn = info->rx_evtchn = 0; 1452 unbind_from_irqhandler(queue->tx_irq, queue);
1421 info->tx_irq = info->rx_irq = 0; 1453 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1454 unbind_from_irqhandler(queue->tx_irq, queue);
1455 unbind_from_irqhandler(queue->rx_irq, queue);
1456 }
1457 queue->tx_evtchn = queue->rx_evtchn = 0;
1458 queue->tx_irq = queue->rx_irq = 0;
1422 1459
1423 /* End access and free the pages */ 1460 /* End access and free the pages */
1424 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1461 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1425 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1462 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1426 1463
1427 info->tx_ring_ref = GRANT_INVALID_REF; 1464 queue->tx_ring_ref = GRANT_INVALID_REF;
1428 info->rx_ring_ref = GRANT_INVALID_REF; 1465 queue->rx_ring_ref = GRANT_INVALID_REF;
1429 info->tx.sring = NULL; 1466 queue->tx.sring = NULL;
1430 info->rx.sring = NULL; 1467 queue->rx.sring = NULL;
1468 }
1431} 1469}
1432 1470
1433/** 1471/**
@@ -1468,100 +1506,86 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1468 return 0; 1506 return 0;
1469} 1507}
1470 1508
1471static int setup_netfront_single(struct netfront_info *info) 1509static int setup_netfront_single(struct netfront_queue *queue)
1472{ 1510{
1473 int err; 1511 int err;
1474 1512
1475 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1513 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1476 if (err < 0) 1514 if (err < 0)
1477 goto fail; 1515 goto fail;
1478 1516
1479 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1517 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1480 xennet_interrupt, 1518 xennet_interrupt,
1481 0, info->netdev->name, info); 1519 0, queue->info->netdev->name, queue);
1482 if (err < 0) 1520 if (err < 0)
1483 goto bind_fail; 1521 goto bind_fail;
1484 info->rx_evtchn = info->tx_evtchn; 1522 queue->rx_evtchn = queue->tx_evtchn;
1485 info->rx_irq = info->tx_irq = err; 1523 queue->rx_irq = queue->tx_irq = err;
1486 1524
1487 return 0; 1525 return 0;
1488 1526
1489bind_fail: 1527bind_fail:
1490 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1528 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1491 info->tx_evtchn = 0; 1529 queue->tx_evtchn = 0;
1492fail: 1530fail:
1493 return err; 1531 return err;
1494} 1532}
1495 1533
1496static int setup_netfront_split(struct netfront_info *info) 1534static int setup_netfront_split(struct netfront_queue *queue)
1497{ 1535{
1498 int err; 1536 int err;
1499 1537
1500 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn); 1538 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1501 if (err < 0) 1539 if (err < 0)
1502 goto fail; 1540 goto fail;
1503 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn); 1541 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1504 if (err < 0) 1542 if (err < 0)
1505 goto alloc_rx_evtchn_fail; 1543 goto alloc_rx_evtchn_fail;
1506 1544
1507 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name), 1545 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1508 "%s-tx", info->netdev->name); 1546 "%s-tx", queue->name);
1509 err = bind_evtchn_to_irqhandler(info->tx_evtchn, 1547 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1510 xennet_tx_interrupt, 1548 xennet_tx_interrupt,
1511 0, info->tx_irq_name, info); 1549 0, queue->tx_irq_name, queue);
1512 if (err < 0) 1550 if (err < 0)
1513 goto bind_tx_fail; 1551 goto bind_tx_fail;
1514 info->tx_irq = err; 1552 queue->tx_irq = err;
1515 1553
1516 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name), 1554 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1517 "%s-rx", info->netdev->name); 1555 "%s-rx", queue->name);
1518 err = bind_evtchn_to_irqhandler(info->rx_evtchn, 1556 err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1519 xennet_rx_interrupt, 1557 xennet_rx_interrupt,
1520 0, info->rx_irq_name, info); 1558 0, queue->rx_irq_name, queue);
1521 if (err < 0) 1559 if (err < 0)
1522 goto bind_rx_fail; 1560 goto bind_rx_fail;
1523 info->rx_irq = err; 1561 queue->rx_irq = err;
1524 1562
1525 return 0; 1563 return 0;
1526 1564
1527bind_rx_fail: 1565bind_rx_fail:
1528 unbind_from_irqhandler(info->tx_irq, info); 1566 unbind_from_irqhandler(queue->tx_irq, queue);
1529 info->tx_irq = 0; 1567 queue->tx_irq = 0;
1530bind_tx_fail: 1568bind_tx_fail:
1531 xenbus_free_evtchn(info->xbdev, info->rx_evtchn); 1569 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1532 info->rx_evtchn = 0; 1570 queue->rx_evtchn = 0;
1533alloc_rx_evtchn_fail: 1571alloc_rx_evtchn_fail:
1534 xenbus_free_evtchn(info->xbdev, info->tx_evtchn); 1572 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1535 info->tx_evtchn = 0; 1573 queue->tx_evtchn = 0;
1536fail: 1574fail:
1537 return err; 1575 return err;
1538} 1576}
1539 1577
1540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1578static int setup_netfront(struct xenbus_device *dev,
1579 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1541{ 1580{
1542 struct xen_netif_tx_sring *txs; 1581 struct xen_netif_tx_sring *txs;
1543 struct xen_netif_rx_sring *rxs; 1582 struct xen_netif_rx_sring *rxs;
1544 int err; 1583 int err;
1545 struct net_device *netdev = info->netdev;
1546 unsigned int feature_split_evtchn;
1547 1584
1548 info->tx_ring_ref = GRANT_INVALID_REF; 1585 queue->tx_ring_ref = GRANT_INVALID_REF;
1549 info->rx_ring_ref = GRANT_INVALID_REF; 1586 queue->rx_ring_ref = GRANT_INVALID_REF;
1550 info->rx.sring = NULL; 1587 queue->rx.sring = NULL;
1551 info->tx.sring = NULL; 1588 queue->tx.sring = NULL;
1552 netdev->irq = 0;
1553
1554 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555 "feature-split-event-channels", "%u",
1556 &feature_split_evtchn);
1557 if (err < 0)
1558 feature_split_evtchn = 0;
1559
1560 err = xen_net_read_mac(dev, netdev->dev_addr);
1561 if (err) {
1562 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1563 goto fail;
1564 }
1565 1589
1566 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1590 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1567 if (!txs) { 1591 if (!txs) {
@@ -1570,13 +1594,13 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1570 goto fail; 1594 goto fail;
1571 } 1595 }
1572 SHARED_RING_INIT(txs); 1596 SHARED_RING_INIT(txs);
1573 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1597 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1574 1598
1575 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1599 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1576 if (err < 0) 1600 if (err < 0)
1577 goto grant_tx_ring_fail; 1601 goto grant_tx_ring_fail;
1602 queue->tx_ring_ref = err;
1578 1603
1579 info->tx_ring_ref = err;
1580 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1604 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1581 if (!rxs) { 1605 if (!rxs) {
1582 err = -ENOMEM; 1606 err = -ENOMEM;
@@ -1584,21 +1608,21 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1584 goto alloc_rx_ring_fail; 1608 goto alloc_rx_ring_fail;
1585 } 1609 }
1586 SHARED_RING_INIT(rxs); 1610 SHARED_RING_INIT(rxs);
1587 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1611 FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1588 1612
1589 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1613 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1590 if (err < 0) 1614 if (err < 0)
1591 goto grant_rx_ring_fail; 1615 goto grant_rx_ring_fail;
1592 info->rx_ring_ref = err; 1616 queue->rx_ring_ref = err;
1593 1617
1594 if (feature_split_evtchn) 1618 if (feature_split_evtchn)
1595 err = setup_netfront_split(info); 1619 err = setup_netfront_split(queue);
1596 /* setup single event channel if 1620 /* setup single event channel if
1597 * a) feature-split-event-channels == 0 1621 * a) feature-split-event-channels == 0
1598 * b) feature-split-event-channels == 1 but failed to setup 1622 * b) feature-split-event-channels == 1 but failed to setup
1599 */ 1623 */
1600 if (!feature_split_evtchn || (feature_split_evtchn && err)) 1624 if (!feature_split_evtchn || (feature_split_evtchn && err))
1601 err = setup_netfront_single(info); 1625 err = setup_netfront_single(queue);
1602 1626
1603 if (err) 1627 if (err)
1604 goto alloc_evtchn_fail; 1628 goto alloc_evtchn_fail;
@@ -1609,17 +1633,163 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1609 * granted pages because backend is not accessing it at this point. 1633 * granted pages because backend is not accessing it at this point.
1610 */ 1634 */
1611alloc_evtchn_fail: 1635alloc_evtchn_fail:
1612 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0); 1636 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1613grant_rx_ring_fail: 1637grant_rx_ring_fail:
1614 free_page((unsigned long)rxs); 1638 free_page((unsigned long)rxs);
1615alloc_rx_ring_fail: 1639alloc_rx_ring_fail:
1616 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0); 1640 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1617grant_tx_ring_fail: 1641grant_tx_ring_fail:
1618 free_page((unsigned long)txs); 1642 free_page((unsigned long)txs);
1619fail: 1643fail:
1620 return err; 1644 return err;
1621} 1645}
1622 1646
1647/* Queue-specific initialisation
1648 * This used to be done in xennet_create_dev() but must now
1649 * be run per-queue.
1650 */
1651static int xennet_init_queue(struct netfront_queue *queue)
1652{
1653 unsigned short i;
1654 int err = 0;
1655
1656 spin_lock_init(&queue->tx_lock);
1657 spin_lock_init(&queue->rx_lock);
1658
1659 skb_queue_head_init(&queue->rx_batch);
1660 queue->rx_target = RX_DFL_MIN_TARGET;
1661 queue->rx_min_target = RX_DFL_MIN_TARGET;
1662 queue->rx_max_target = RX_MAX_TARGET;
1663
1664 init_timer(&queue->rx_refill_timer);
1665 queue->rx_refill_timer.data = (unsigned long)queue;
1666 queue->rx_refill_timer.function = rx_refill_timeout;
1667
1668 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1669 queue->info->netdev->name, queue->id);
1670
1671 /* Initialise tx_skbs as a free chain containing every entry. */
1672 queue->tx_skb_freelist = 0;
1673 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1674 skb_entry_set_link(&queue->tx_skbs[i], i+1);
1675 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1676 queue->grant_tx_page[i] = NULL;
1677 }
1678
1679 /* Clear out rx_skbs */
1680 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1681 queue->rx_skbs[i] = NULL;
1682 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1683 }
1684
1685 /* A grant for every tx ring slot */
1686 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1687 &queue->gref_tx_head) < 0) {
1688 pr_alert("can't alloc tx grant refs\n");
1689 err = -ENOMEM;
1690 goto exit;
1691 }
1692
1693 /* A grant for every rx ring slot */
1694 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1695 &queue->gref_rx_head) < 0) {
1696 pr_alert("can't alloc rx grant refs\n");
1697 err = -ENOMEM;
1698 goto exit_free_tx;
1699 }
1700
1701 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703 return 0;
1704
1705 exit_free_tx:
1706 gnttab_free_grant_references(queue->gref_tx_head);
1707 exit:
1708 return err;
1709}
1710
1711static int write_queue_xenstore_keys(struct netfront_queue *queue,
1712 struct xenbus_transaction *xbt, int write_hierarchical)
1713{
1714 /* Write the queue-specific keys into XenStore in the traditional
1715 * way for a single queue, or in a queue subkeys for multiple
1716 * queues.
1717 */
1718 struct xenbus_device *dev = queue->info->xbdev;
1719 int err;
1720 const char *message;
1721 char *path;
1722 size_t pathsize;
1723
1724 /* Choose the correct place to write the keys */
1725 if (write_hierarchical) {
1726 pathsize = strlen(dev->nodename) + 10;
1727 path = kzalloc(pathsize, GFP_KERNEL);
1728 if (!path) {
1729 err = -ENOMEM;
1730 message = "out of memory while writing ring references";
1731 goto error;
1732 }
1733 snprintf(path, pathsize, "%s/queue-%u",
1734 dev->nodename, queue->id);
1735 } else {
1736 path = (char *)dev->nodename;
1737 }
1738
1739 /* Write ring references */
1740 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1741 queue->tx_ring_ref);
1742 if (err) {
1743 message = "writing tx-ring-ref";
1744 goto error;
1745 }
1746
1747 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1748 queue->rx_ring_ref);
1749 if (err) {
1750 message = "writing rx-ring-ref";
1751 goto error;
1752 }
1753
1754 /* Write event channels; taking into account both shared
1755 * and split event channel scenarios.
1756 */
1757 if (queue->tx_evtchn == queue->rx_evtchn) {
1758 /* Shared event channel */
1759 err = xenbus_printf(*xbt, path,
1760 "event-channel", "%u", queue->tx_evtchn);
1761 if (err) {
1762 message = "writing event-channel";
1763 goto error;
1764 }
1765 } else {
1766 /* Split event channels */
1767 err = xenbus_printf(*xbt, path,
1768 "event-channel-tx", "%u", queue->tx_evtchn);
1769 if (err) {
1770 message = "writing event-channel-tx";
1771 goto error;
1772 }
1773
1774 err = xenbus_printf(*xbt, path,
1775 "event-channel-rx", "%u", queue->rx_evtchn);
1776 if (err) {
1777 message = "writing event-channel-rx";
1778 goto error;
1779 }
1780 }
1781
1782 if (write_hierarchical)
1783 kfree(path);
1784 return 0;
1785
1786error:
1787 if (write_hierarchical)
1788 kfree(path);
1789 xenbus_dev_fatal(dev, err, "%s", message);
1790 return err;
1791}
1792
1623/* Common code used when first setting up, and when resuming. */ 1793/* Common code used when first setting up, and when resuming. */
1624static int talk_to_netback(struct xenbus_device *dev, 1794static int talk_to_netback(struct xenbus_device *dev,
1625 struct netfront_info *info) 1795 struct netfront_info *info)
@@ -1627,11 +1797,83 @@ static int talk_to_netback(struct xenbus_device *dev,
1627 const char *message; 1797 const char *message;
1628 struct xenbus_transaction xbt; 1798 struct xenbus_transaction xbt;
1629 int err; 1799 int err;
1800 unsigned int feature_split_evtchn;
1801 unsigned int i = 0;
1802 unsigned int max_queues = 0;
1803 struct netfront_queue *queue = NULL;
1804 unsigned int num_queues = 1;
1630 1805
1631 /* Create shared ring, alloc event channel. */ 1806 info->netdev->irq = 0;
1632 err = setup_netfront(dev, info); 1807
1633 if (err) 1808 /* Check if backend supports multiple queues */
1809 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1810 "multi-queue-max-queues", "%u", &max_queues);
1811 if (err < 0)
1812 max_queues = 1;
1813 num_queues = min(max_queues, xennet_max_queues);
1814
1815 /* Check feature-split-event-channels */
1816 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1817 "feature-split-event-channels", "%u",
1818 &feature_split_evtchn);
1819 if (err < 0)
1820 feature_split_evtchn = 0;
1821
1822 /* Read mac addr. */
1823 err = xen_net_read_mac(dev, info->netdev->dev_addr);
1824 if (err) {
1825 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1826 goto out;
1827 }
1828
1829 /* Allocate array of queues */
1830 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
1831 if (!info->queues) {
1832 err = -ENOMEM;
1634 goto out; 1833 goto out;
1834 }
1835 rtnl_lock();
1836 netif_set_real_num_tx_queues(info->netdev, num_queues);
1837 rtnl_unlock();
1838
1839 /* Create shared ring, alloc event channel -- for each queue */
1840 for (i = 0; i < num_queues; ++i) {
1841 queue = &info->queues[i];
1842 queue->id = i;
1843 queue->info = info;
1844 err = xennet_init_queue(queue);
1845 if (err) {
1846 /* xennet_init_queue() cleans up after itself on failure,
1847 * but we still have to clean up any previously initialised
1848 * queues. If i > 0, set num_queues to i, then goto
1849 * destroy_ring, which calls xennet_disconnect_backend()
1850 * to tidy up.
1851 */
1852 if (i > 0) {
1853 rtnl_lock();
1854 netif_set_real_num_tx_queues(info->netdev, i);
1855 rtnl_unlock();
1856 goto destroy_ring;
1857 } else {
1858 goto out;
1859 }
1860 }
1861 err = setup_netfront(dev, queue, feature_split_evtchn);
1862 if (err) {
1863 /* As for xennet_init_queue(), setup_netfront() will tidy
1864 * up the current queue on error, but we need to clean up
1865 * those already allocated.
1866 */
1867 if (i > 0) {
1868 rtnl_lock();
1869 netif_set_real_num_tx_queues(info->netdev, i);
1870 rtnl_unlock();
1871 goto destroy_ring;
1872 } else {
1873 goto out;
1874 }
1875 }
1876 }
1635 1877
1636again: 1878again:
1637 err = xenbus_transaction_start(&xbt); 1879 err = xenbus_transaction_start(&xbt);
@@ -1640,41 +1882,29 @@ again:
1640 goto destroy_ring; 1882 goto destroy_ring;
1641 } 1883 }
1642 1884
1643 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1885 if (num_queues == 1) {
1644 info->tx_ring_ref); 1886 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1645 if (err) { 1887 if (err)
1646 message = "writing tx ring-ref"; 1888 goto abort_transaction_no_dev_fatal;
1647 goto abort_transaction;
1648 }
1649 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650 info->rx_ring_ref);
1651 if (err) {
1652 message = "writing rx ring-ref";
1653 goto abort_transaction;
1654 }
1655
1656 if (info->tx_evtchn == info->rx_evtchn) {
1657 err = xenbus_printf(xbt, dev->nodename,
1658 "event-channel", "%u", info->tx_evtchn);
1659 if (err) {
1660 message = "writing event-channel";
1661 goto abort_transaction;
1662 }
1663 } else { 1889 } else {
1664 err = xenbus_printf(xbt, dev->nodename, 1890 /* Write the number of queues */
1665 "event-channel-tx", "%u", info->tx_evtchn); 1891 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1892 "%u", num_queues);
1666 if (err) { 1893 if (err) {
1667 message = "writing event-channel-tx"; 1894 message = "writing multi-queue-num-queues";
1668 goto abort_transaction; 1895 goto abort_transaction_no_dev_fatal;
1669 } 1896 }
1670 err = xenbus_printf(xbt, dev->nodename, 1897
1671 "event-channel-rx", "%u", info->rx_evtchn); 1898 /* Write the keys for each queue */
1672 if (err) { 1899 for (i = 0; i < num_queues; ++i) {
1673 message = "writing event-channel-rx"; 1900 queue = &info->queues[i];
1674 goto abort_transaction; 1901 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1902 if (err)
1903 goto abort_transaction_no_dev_fatal;
1675 } 1904 }
1676 } 1905 }
1677 1906
1907 /* The remaining keys are not queue-specific */
1678 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1908 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679 1); 1909 1);
1680 if (err) { 1910 if (err) {
@@ -1724,10 +1954,16 @@ again:
1724 return 0; 1954 return 0;
1725 1955
1726 abort_transaction: 1956 abort_transaction:
1727 xenbus_transaction_end(xbt, 1);
1728 xenbus_dev_fatal(dev, err, "%s", message); 1957 xenbus_dev_fatal(dev, err, "%s", message);
1958abort_transaction_no_dev_fatal:
1959 xenbus_transaction_end(xbt, 1);
1729 destroy_ring: 1960 destroy_ring:
1730 xennet_disconnect_backend(info); 1961 xennet_disconnect_backend(info);
1962 kfree(info->queues);
1963 info->queues = NULL;
1964 rtnl_lock();
1965 netif_set_real_num_tx_queues(info->netdev, 0);
1966 rtnl_lock();
1731 out: 1967 out:
1732 return err; 1968 return err;
1733} 1969}
@@ -1735,11 +1971,14 @@ again:
1735static int xennet_connect(struct net_device *dev) 1971static int xennet_connect(struct net_device *dev)
1736{ 1972{
1737 struct netfront_info *np = netdev_priv(dev); 1973 struct netfront_info *np = netdev_priv(dev);
1974 unsigned int num_queues = 0;
1738 int i, requeue_idx, err; 1975 int i, requeue_idx, err;
1739 struct sk_buff *skb; 1976 struct sk_buff *skb;
1740 grant_ref_t ref; 1977 grant_ref_t ref;
1741 struct xen_netif_rx_request *req; 1978 struct xen_netif_rx_request *req;
1742 unsigned int feature_rx_copy; 1979 unsigned int feature_rx_copy;
1980 unsigned int j = 0;
1981 struct netfront_queue *queue = NULL;
1743 1982
1744 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1983 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745 "feature-rx-copy", "%u", &feature_rx_copy); 1984 "feature-rx-copy", "%u", &feature_rx_copy);
@@ -1756,40 +1995,47 @@ static int xennet_connect(struct net_device *dev)
1756 if (err) 1995 if (err)
1757 return err; 1996 return err;
1758 1997
1998 /* talk_to_netback() sets the correct number of queues */
1999 num_queues = dev->real_num_tx_queues;
2000
1759 rtnl_lock(); 2001 rtnl_lock();
1760 netdev_update_features(dev); 2002 netdev_update_features(dev);
1761 rtnl_unlock(); 2003 rtnl_unlock();
1762 2004
1763 spin_lock_bh(&np->rx_lock); 2005 /* By now, the queue structures have been set up */
1764 spin_lock_irq(&np->tx_lock); 2006 for (j = 0; j < num_queues; ++j) {
2007 queue = &np->queues[j];
2008 spin_lock_bh(&queue->rx_lock);
2009 spin_lock_irq(&queue->tx_lock);
1765 2010
1766 /* Step 1: Discard all pending TX packet fragments. */ 2011 /* Step 1: Discard all pending TX packet fragments. */
1767 xennet_release_tx_bufs(np); 2012 xennet_release_tx_bufs(queue);
1768 2013
1769 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2014 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2015 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1771 skb_frag_t *frag; 2016 skb_frag_t *frag;
1772 const struct page *page; 2017 const struct page *page;
1773 if (!np->rx_skbs[i]) 2018 if (!queue->rx_skbs[i])
1774 continue; 2019 continue;
1775 2020
1776 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 2021 skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
1777 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 2022 ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
1778 req = RING_GET_REQUEST(&np->rx, requeue_idx); 2023 req = RING_GET_REQUEST(&queue->rx, requeue_idx);
1779 2024
1780 frag = &skb_shinfo(skb)->frags[0]; 2025 frag = &skb_shinfo(skb)->frags[0];
1781 page = skb_frag_page(frag); 2026 page = skb_frag_page(frag);
1782 gnttab_grant_foreign_access_ref( 2027 gnttab_grant_foreign_access_ref(
1783 ref, np->xbdev->otherend_id, 2028 ref, queue->info->xbdev->otherend_id,
1784 pfn_to_mfn(page_to_pfn(page)), 2029 pfn_to_mfn(page_to_pfn(page)),
1785 0); 2030 0);
1786 req->gref = ref; 2031 req->gref = ref;
1787 req->id = requeue_idx; 2032 req->id = requeue_idx;
1788 2033
1789 requeue_idx++; 2034 requeue_idx++;
1790 } 2035 }
1791 2036
1792 np->rx.req_prod_pvt = requeue_idx; 2037 queue->rx.req_prod_pvt = requeue_idx;
2038 }
1793 2039
1794 /* 2040 /*
1795 * Step 3: All public and private state should now be sane. Get 2041 * Step 3: All public and private state should now be sane. Get
@@ -1798,14 +2044,17 @@ static int xennet_connect(struct net_device *dev)
1798 * packets. 2044 * packets.
1799 */ 2045 */
1800 netif_carrier_on(np->netdev); 2046 netif_carrier_on(np->netdev);
1801 notify_remote_via_irq(np->tx_irq); 2047 for (j = 0; j < num_queues; ++j) {
1802 if (np->tx_irq != np->rx_irq) 2048 queue = &np->queues[j];
1803 notify_remote_via_irq(np->rx_irq); 2049 notify_remote_via_irq(queue->tx_irq);
1804 xennet_tx_buf_gc(dev); 2050 if (queue->tx_irq != queue->rx_irq)
1805 xennet_alloc_rx_buffers(dev); 2051 notify_remote_via_irq(queue->rx_irq);
1806 2052 xennet_tx_buf_gc(queue);
1807 spin_unlock_irq(&np->tx_lock); 2053 xennet_alloc_rx_buffers(queue);
1808 spin_unlock_bh(&np->rx_lock); 2054
2055 spin_unlock_irq(&queue->tx_lock);
2056 spin_unlock_bh(&queue->rx_lock);
2057 }
1809 2058
1810 return 0; 2059 return 0;
1811} 2060}
@@ -1878,7 +2127,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
1878 int i; 2127 int i;
1879 2128
1880 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2129 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1881 data[i] = *(unsigned long *)(np + xennet_stats[i].offset); 2130 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
1882} 2131}
1883 2132
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2133static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -1909,8 +2158,12 @@ static ssize_t show_rxbuf_min(struct device *dev,
1909{ 2158{
1910 struct net_device *netdev = to_net_dev(dev); 2159 struct net_device *netdev = to_net_dev(dev);
1911 struct netfront_info *info = netdev_priv(netdev); 2160 struct netfront_info *info = netdev_priv(netdev);
2161 unsigned int num_queues = netdev->real_num_tx_queues;
1912 2162
1913 return sprintf(buf, "%u\n", info->rx_min_target); 2163 if (num_queues)
2164 return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
2165 else
2166 return sprintf(buf, "%u\n", RX_MIN_TARGET);
1914} 2167}
1915 2168
1916static ssize_t store_rxbuf_min(struct device *dev, 2169static ssize_t store_rxbuf_min(struct device *dev,
@@ -1919,8 +2172,11 @@ static ssize_t store_rxbuf_min(struct device *dev,
1919{ 2172{
1920 struct net_device *netdev = to_net_dev(dev); 2173 struct net_device *netdev = to_net_dev(dev);
1921 struct netfront_info *np = netdev_priv(netdev); 2174 struct netfront_info *np = netdev_priv(netdev);
2175 unsigned int num_queues = netdev->real_num_tx_queues;
1922 char *endp; 2176 char *endp;
1923 unsigned long target; 2177 unsigned long target;
2178 unsigned int i;
2179 struct netfront_queue *queue;
1924 2180
1925 if (!capable(CAP_NET_ADMIN)) 2181 if (!capable(CAP_NET_ADMIN))
1926 return -EPERM; 2182 return -EPERM;
@@ -1934,16 +2190,19 @@ static ssize_t store_rxbuf_min(struct device *dev,
1934 if (target > RX_MAX_TARGET) 2190 if (target > RX_MAX_TARGET)
1935 target = RX_MAX_TARGET; 2191 target = RX_MAX_TARGET;
1936 2192
1937 spin_lock_bh(&np->rx_lock); 2193 for (i = 0; i < num_queues; ++i) {
1938 if (target > np->rx_max_target) 2194 queue = &np->queues[i];
1939 np->rx_max_target = target; 2195 spin_lock_bh(&queue->rx_lock);
1940 np->rx_min_target = target; 2196 if (target > queue->rx_max_target)
1941 if (target > np->rx_target) 2197 queue->rx_max_target = target;
1942 np->rx_target = target; 2198 queue->rx_min_target = target;
2199 if (target > queue->rx_target)
2200 queue->rx_target = target;
1943 2201
1944 xennet_alloc_rx_buffers(netdev); 2202 xennet_alloc_rx_buffers(queue);
1945 2203
1946 spin_unlock_bh(&np->rx_lock); 2204 spin_unlock_bh(&queue->rx_lock);
2205 }
1947 return len; 2206 return len;
1948} 2207}
1949 2208
@@ -1952,8 +2211,12 @@ static ssize_t show_rxbuf_max(struct device *dev,
1952{ 2211{
1953 struct net_device *netdev = to_net_dev(dev); 2212 struct net_device *netdev = to_net_dev(dev);
1954 struct netfront_info *info = netdev_priv(netdev); 2213 struct netfront_info *info = netdev_priv(netdev);
2214 unsigned int num_queues = netdev->real_num_tx_queues;
1955 2215
1956 return sprintf(buf, "%u\n", info->rx_max_target); 2216 if (num_queues)
2217 return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
2218 else
2219 return sprintf(buf, "%u\n", RX_MAX_TARGET);
1957} 2220}
1958 2221
1959static ssize_t store_rxbuf_max(struct device *dev, 2222static ssize_t store_rxbuf_max(struct device *dev,
@@ -1962,8 +2225,11 @@ static ssize_t store_rxbuf_max(struct device *dev,
1962{ 2225{
1963 struct net_device *netdev = to_net_dev(dev); 2226 struct net_device *netdev = to_net_dev(dev);
1964 struct netfront_info *np = netdev_priv(netdev); 2227 struct netfront_info *np = netdev_priv(netdev);
2228 unsigned int num_queues = netdev->real_num_tx_queues;
1965 char *endp; 2229 char *endp;
1966 unsigned long target; 2230 unsigned long target;
2231 unsigned int i = 0;
2232 struct netfront_queue *queue = NULL;
1967 2233
1968 if (!capable(CAP_NET_ADMIN)) 2234 if (!capable(CAP_NET_ADMIN))
1969 return -EPERM; 2235 return -EPERM;
@@ -1977,16 +2243,19 @@ static ssize_t store_rxbuf_max(struct device *dev,
1977 if (target > RX_MAX_TARGET) 2243 if (target > RX_MAX_TARGET)
1978 target = RX_MAX_TARGET; 2244 target = RX_MAX_TARGET;
1979 2245
1980 spin_lock_bh(&np->rx_lock); 2246 for (i = 0; i < num_queues; ++i) {
1981 if (target < np->rx_min_target) 2247 queue = &np->queues[i];
1982 np->rx_min_target = target; 2248 spin_lock_bh(&queue->rx_lock);
1983 np->rx_max_target = target; 2249 if (target < queue->rx_min_target)
1984 if (target < np->rx_target) 2250 queue->rx_min_target = target;
1985 np->rx_target = target; 2251 queue->rx_max_target = target;
2252 if (target < queue->rx_target)
2253 queue->rx_target = target;
1986 2254
1987 xennet_alloc_rx_buffers(netdev); 2255 xennet_alloc_rx_buffers(queue);
1988 2256
1989 spin_unlock_bh(&np->rx_lock); 2257 spin_unlock_bh(&queue->rx_lock);
2258 }
1990 return len; 2259 return len;
1991} 2260}
1992 2261
@@ -1995,8 +2264,12 @@ static ssize_t show_rxbuf_cur(struct device *dev,
1995{ 2264{
1996 struct net_device *netdev = to_net_dev(dev); 2265 struct net_device *netdev = to_net_dev(dev);
1997 struct netfront_info *info = netdev_priv(netdev); 2266 struct netfront_info *info = netdev_priv(netdev);
2267 unsigned int num_queues = netdev->real_num_tx_queues;
1998 2268
1999 return sprintf(buf, "%u\n", info->rx_target); 2269 if (num_queues)
2270 return sprintf(buf, "%u\n", info->queues[0].rx_target);
2271 else
2272 return sprintf(buf, "0\n");
2000} 2273}
2001 2274
2002static struct device_attribute xennet_attrs[] = { 2275static struct device_attribute xennet_attrs[] = {
@@ -2043,6 +2316,9 @@ static const struct xenbus_device_id netfront_ids[] = {
2043static int xennet_remove(struct xenbus_device *dev) 2316static int xennet_remove(struct xenbus_device *dev)
2044{ 2317{
2045 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2318 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2319 unsigned int num_queues = info->netdev->real_num_tx_queues;
2320 struct netfront_queue *queue = NULL;
2321 unsigned int i = 0;
2046 2322
2047 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2323 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048 2324
@@ -2052,7 +2328,15 @@ static int xennet_remove(struct xenbus_device *dev)
2052 2328
2053 unregister_netdev(info->netdev); 2329 unregister_netdev(info->netdev);
2054 2330
2055 del_timer_sync(&info->rx_refill_timer); 2331 for (i = 0; i < num_queues; ++i) {
2332 queue = &info->queues[i];
2333 del_timer_sync(&queue->rx_refill_timer);
2334 }
2335
2336 if (num_queues) {
2337 kfree(info->queues);
2338 info->queues = NULL;
2339 }
2056 2340
2057 free_percpu(info->stats); 2341 free_percpu(info->stats);
2058 2342
@@ -2078,6 +2362,9 @@ static int __init netif_init(void)
2078 2362
2079 pr_info("Initialising Xen virtual ethernet driver\n"); 2363 pr_info("Initialising Xen virtual ethernet driver\n");
2080 2364
2365 /* Allow as many queues as there are CPUs, by default */
2366 xennet_max_queues = num_online_cpus();
2367
2081 return xenbus_register_frontend(&netfront_driver); 2368 return xenbus_register_frontend(&netfront_driver);
2082} 2369}
2083module_init(netif_init); 2370module_init(netif_init);